source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_threads.py | import sys
try:
import threading
HAVE_THREADING = True
except ImportError:
import dummy_threading as threading
HAVE_THREADING = False
VERBOSE = False
#VERBOSE = True
import mpi4pyve.rc
mpi4pyve.rc.thread_level = 'multiple'
from mpi4pyve import MPI
import mpiunittest as unittest
pypy3_lt_50 = (hasattr(sys, 'pypy_version_info') and
sys.version_info[0] == 3 and
sys.pypy_version_info < (5, 0))
class TestMPIThreads(unittest.TestCase):
def testThreadLevels(self):
levels = [MPI.THREAD_SINGLE,
MPI.THREAD_FUNNELED,
MPI.THREAD_SERIALIZED,
MPI.THREAD_MULTIPLE]
for i in range(len(levels)-1):
self.assertTrue(levels[i] < levels[i+1])
try:
provided = MPI.Query_thread()
self.assertTrue(provided in levels)
except NotImplementedError:
self.skipTest('mpi-query_thread')
def testIsThreadMain(self):
try:
flag = MPI.Is_thread_main()
except NotImplementedError:
self.skipTest('mpi-is_thread_main')
name = threading.current_thread().name
main = (name == 'MainThread') or not HAVE_THREADING
self.assertEqual(flag, main)
if VERBOSE:
log = lambda m: sys.stderr.write(m+'\n')
log("%s: MPI.Is_thread_main() -> %s" % (name, flag))
@unittest.skipIf(pypy3_lt_50, 'pypy3(<5.0)')
@unittest.skip('necmpi')
def testIsThreadMainInThread(self):
try:
provided = MPI.Query_thread()
except NotImplementedError:
self.skipTest('mpi-query_thread')
self.testIsThreadMain()
T = [threading.Thread(target=self.testIsThreadMain) for _ in range(5)]
if provided == MPI.THREAD_MULTIPLE:
for t in T:
t.start()
for t in T:
t.join()
elif provided == MPI.THREAD_SERIALIZED:
for t in T:
t.start()
t.join()
else:
self.skipTest('mpi-thread_level')
if __name__ == '__main__':
unittest.main()
|
test_db_thread_safe.py | import os
import random
import time
from threading import Thread
import pytest
from graphility.database_thread_safe import ThreadSafeDatabase
from .hash_tests import HashIndexTests
from .shared import DB_Tests, WithAIndex
from .tree_tests import TreeIndexTests
class Test_Database(DB_Tests):
_db = ThreadSafeDatabase
class Test_HashIndex(HashIndexTests):
_db = ThreadSafeDatabase
class Test_TreeIndex(TreeIndexTests):
_db = ThreadSafeDatabase
class Test_Threads:
_db = ThreadSafeDatabase
def test_one(self, tmpdir):
db = self._db(os.path.join(str(tmpdir), "db"))
db.create()
db.add_index(WithAIndex(db.path, "with_a"))
ths = []
for x in range(1, 101):
ths.append(Thread(target=db.insert, args=(dict(a=x),)))
for th in ths:
th.start()
for th in ths:
th.join()
assert db.count(db.all, "with_a") == 100
l = list(range(1, 101))
for curr in db.all("with_a", with_doc=True):
print(curr)
a = curr["doc"]["a"]
l.remove(a)
assert l == []
@pytest.mark.parametrize(
("threads_num",), [(x,) for x in (3, 10, 20, 50, 100, 250)]
)
def test_conc_update(self, tmpdir, threads_num):
db = self._db(os.path.join(str(tmpdir), "db"))
db.create()
db.add_index(WithAIndex(db.path, "with_a"))
db.insert(dict(a=1))
def updater():
i = 0
time.sleep(random.random() // 100)
while True:
rec = list(db.all("id", limit=1))
doc = rec[0].copy()
doc["a"] += 1
try:
db.update(doc)
except:
i += 1
if i > 100:
return False
time.sleep(random.random() // 100)
else:
return True
ths = []
for x in range(threads_num): # python threads... beware!!!
ths.append(Thread(target=updater))
for th in ths:
th.start()
for th in ths:
th.join()
assert db.count(db.all, "with_a", with_doc=True) == 1
assert db.count(db.all, "id") == 1
|
videoplayer.py | import time
import threading
import xbmc
import xbmcgui
import kodigui
import windowutils
import opener
import busy
import search
import dropdown
from lib import util
from lib import player
from lib import colors
from lib.util import T
PASSOUT_PROTECTION_DURATION_SECONDS = 7200
PASSOUT_LAST_VIDEO_DURATION_MILLIS = 1200000
class VideoPlayerWindow(kodigui.ControlledWindow, windowutils.UtilMixin):
xmlFile = 'script-plex-video_player.xml'
path = util.ADDON.getAddonInfo('path')
theme = 'Main'
res = '1080i'
width = 1920
height = 1080
NEXT_DIM = (537, 303)
PREV_DIM = (462, 259)
ONDECK_DIM = (329, 185)
RELATED_DIM = (268, 397)
ROLES_DIM = (268, 268)
OPTIONS_GROUP_ID = 200
PREV_BUTTON_ID = 101
NEXT_BUTTON_ID = 102
ONDECK_LIST_ID = 400
RELATED_LIST_ID = 401
ROLES_LIST_ID = 403
HOME_BUTTON_ID = 201
SEARCH_BUTTON_ID = 202
PLAYER_STATUS_BUTTON_ID = 204
def __init__(self, *args, **kwargs):
kodigui.ControlledWindow.__init__(self, *args, **kwargs)
windowutils.UtilMixin.__init__(self)
self.playQueue = kwargs.get('play_queue')
self.video = kwargs.get('video')
self.resume = bool(kwargs.get('resume'))
self.postPlayMode = False
self.prev = None
self.playlist = None
self.handler = None
self.next = None
self.videos = None
self.trailer = None
self.aborted = True
self.timeout = None
self.passoutProtection = 0
def doClose(self):
util.DEBUG_LOG('VideoPlayerWindow: Closing')
self.timeout = None
kodigui.ControlledWindow.doClose(self)
player.PLAYER.handler.sessionEnded()
def onFirstInit(self):
player.PLAYER.on('session.ended', self.sessionEnded)
player.PLAYER.on('post.play', self.postPlay)
player.PLAYER.on('change.background', self.changeBackground)
self.onDeckListControl = kodigui.ManagedControlList(self, self.ONDECK_LIST_ID, 5)
self.relatedListControl = kodigui.ManagedControlList(self, self.RELATED_LIST_ID, 5)
self.rolesListControl = kodigui.ManagedControlList(self, self.ROLES_LIST_ID, 5)
util.DEBUG_LOG('VideoPlayerWindow: Starting session (ID: {0})'.format(id(self)))
self.resetPassoutProtection()
self.play(resume=self.resume)
def onReInit(self):
self.setBackground()
def onAction(self, action):
try:
if self.postPlayMode:
self.cancelTimer()
self.resetPassoutProtection()
if action in(xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_CONTEXT_MENU):
if not xbmc.getCondVisibility('ControlGroup({0}).HasFocus(0)'.format(self.OPTIONS_GROUP_ID)):
self.setFocusId(self.OPTIONS_GROUP_ID)
return
if action in(xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_PREVIOUS_MENU):
self.doClose()
return
if action in (xbmcgui.ACTION_NEXT_ITEM, xbmcgui.ACTION_PLAYER_PLAY):
self.playVideo()
elif action == xbmcgui.ACTION_PREV_ITEM:
self.playVideo(prev=True)
elif action == xbmcgui.ACTION_STOP:
self.doClose()
except:
util.ERROR()
kodigui.ControlledWindow.onAction(self, action)
def onClick(self, controlID):
if not self.postPlayMode:
return
self.cancelTimer()
if controlID == self.HOME_BUTTON_ID:
self.goHome()
elif controlID == self.ONDECK_LIST_ID:
self.openItem(self.onDeckListControl)
elif controlID == self.RELATED_LIST_ID:
self.openItem(self.relatedListControl)
elif controlID == self.ROLES_LIST_ID:
self.roleClicked()
elif controlID == self.PREV_BUTTON_ID:
self.playVideo(prev=True)
elif controlID == self.NEXT_BUTTON_ID:
self.playVideo()
elif controlID == self.PLAYER_STATUS_BUTTON_ID:
self.showAudioPlayer()
elif controlID == self.SEARCH_BUTTON_ID:
self.searchButtonClicked()
def onFocus(self, controlID):
if not self.postPlayMode:
return
if 399 < controlID < 500:
self.setProperty('hub.focus', str(controlID - 400))
else:
self.setProperty('hub.focus', '')
if xbmc.getCondVisibility('Control.HasFocus(101) | Control.HasFocus(102) | ControlGroup(200).HasFocus(0)'):
self.setProperty('on.extras', '')
elif xbmc.getCondVisibility('ControlGroup(60).HasFocus(0)'):
self.setProperty('on.extras', '1')
def searchButtonClicked(self):
self.processCommand(search.dialog(self, section_id=self.prev.getLibrarySectionId() or None))
def roleClicked(self):
mli = self.rolesListControl.getSelectedItem()
if not mli:
return
sectionRoles = busy.widthDialog(mli.dataSource.sectionRoles, '')
if not sectionRoles:
util.DEBUG_LOG('No sections found for actor')
return
if len(sectionRoles) > 1:
x, y = self.getRoleItemDDPosition()
options = [{'role': r, 'display': r.reasonTitle} for r in sectionRoles]
choice = dropdown.showDropdown(options, (x, y), pos_is_bottom=True, close_direction='bottom')
if not choice:
return
role = choice['role']
else:
role = sectionRoles[0]
self.processCommand(opener.open(role))
def getRoleItemDDPosition(self):
y = 1000
if xbmc.getCondVisibility('Control.IsVisible(500)'):
y += 360
if xbmc.getCondVisibility('Control.IsVisible(501)'):
y += 520
if xbmc.getCondVisibility('!String.IsEmpty(Window.Property(on.extras))'):
y -= 300
if xbmc.getCondVisibility('Integer.IsGreater(Window.Property(hub.focus),0) + Control.IsVisible(500)'):
y -= 500
if xbmc.getCondVisibility('Integer.IsGreater(Window.Property(hub.focus),1) + Control.IsVisible(501)'):
y -= 500
focus = int(xbmc.getInfoLabel('Container(403).Position'))
x = ((focus + 1) * 304) - 100
return x, y
def setBackground(self):
video = self.video if self.video else self.playQueue.current()
self.setProperty('background', video.defaultArt.asTranscodedImageURL(1920, 1080, opacity=60, background=colors.noAlpha.Background))
def changeBackground(self, url, **kwargs):
self.setProperty('background', url)
def sessionEnded(self, session_id=None, **kwargs):
if session_id != id(self):
util.DEBUG_LOG('VideoPlayerWindow: Ignoring session end (ID: {0} - SessionID: {1})'.format(id(self), session_id))
return
util.DEBUG_LOG('VideoPlayerWindow: Session ended - closing (ID: {0})'.format(id(self)))
self.doClose()
def play(self, resume=False, handler=None):
self.hidePostPlay()
self.setBackground()
if self.playQueue:
player.PLAYER.playVideoPlaylist(self.playQueue, resume=self.resume, session_id=id(self), handler=handler)
elif self.video:
player.PLAYER.playVideo(self.video, resume=self.resume, force_update=True, session_id=id(self), handler=handler)
def openItem(self, control=None, item=None):
if not item:
mli = control.getSelectedItem()
if not mli:
return
item = mli.dataSource
self.processCommand(opener.open(item))
def showPostPlay(self):
self.postPlayMode = True
self.setProperty('post.play', '1')
def hidePostPlay(self):
self.postPlayMode = False
self.setProperty('post.play', '')
self.setProperties((
'post.play.background',
'info.title',
'info.duration',
'info.summary',
'info.date',
'next.thumb',
'next.title',
'next.subtitle',
'prev.thumb',
'prev.title',
'prev.subtitle',
'related.header',
'has.next'
), '')
self.onDeckListControl.reset()
self.relatedListControl.reset()
self.rolesListControl.reset()
@busy.dialog()
def postPlay(self, video=None, playlist=None, handler=None, **kwargs):
util.DEBUG_LOG('VideoPlayer: Starting post-play')
self.showPostPlay()
self.prev = video
self.playlist = playlist
self.handler = handler
self.getHubs()
self.setProperty(
'thumb.fallback', 'script.plexodus/thumb_fallbacks/{0}.png'.format(self.prev.type in ('show', 'season', 'episode') and 'show' or 'movie')
)
util.DEBUG_LOG('PostPlay: Showing video info')
if self.next:
self.next.reload(includeRelated=1, includeRelatedCount=10, includeExtras=1, includeExtrasCount=10)
self.setInfo()
self.fillOnDeck()
hasPrev = self.fillRelated()
self.fillRoles(hasPrev)
self.startTimer()
if self.next:
self.setFocusId(self.NEXT_BUTTON_ID)
else:
self.setFocusId(self.PREV_BUTTON_ID)
def resetPassoutProtection(self):
self.passoutProtection = time.time() + PASSOUT_PROTECTION_DURATION_SECONDS
def startTimer(self):
if not util.getSetting('post_play_auto', True):
util.DEBUG_LOG('Post play auto-play disabled')
return
if not self.next:
return
if time.time() > self.passoutProtection and self.prev.duration.asInt() > PASSOUT_LAST_VIDEO_DURATION_MILLIS:
util.DEBUG_LOG('Post play auto-play skipped: Passout protection')
return
else:
millis = (self.passoutProtection - time.time()) * 1000
util.DEBUG_LOG('Post play auto-play: Passout protection in {0}'.format(util.durationToShortText(millis)))
util.DEBUG_LOG('Staring post-play timer')
self.timeout = time.time() + 16
threading.Thread(target=self.countdown).start()
def cancelTimer(self):
if self.timeout is not None:
util.DEBUG_LOG('Canceling post-play timer')
self.timeout = None
self.setProperty('countdown', '')
def countdown(self):
while self.timeout and not util.MONITOR.waitForAbort(0.1):
now = time.time()
if self.timeout and now > self.timeout:
self.timeout = None
self.setProperty('countdown', '')
util.DEBUG_LOG('Post-play timer finished')
# This works. The direct method caused the OSD to be broken, possibly because it was triggered from another thread?
# That was the only real difference I could see between the direct method and the user actually clicking the button.
xbmc.executebuiltin('SendClick(,{0})'.format(self.NEXT_BUTTON_ID))
# Direct method, causes issues with OSD
# self.playVideo()
break
elif self.timeout is not None:
self.setProperty('countdown', str(min(15, int((self.timeout or now) - now))))
def getHubs(self):
try:
self.hubs = self.prev.postPlay()
except:
util.ERROR()
self.next = None
if self.playlist:
if self.prev != self.playlist.current():
self.next = self.playlist.current()
else:
if self.prev.type == 'episode' and 'tv.upnext' in self.hubs:
self.next = self.hubs['tv.upnext'].items[-1]
if self.next:
self.setProperty('has.next', '1')
def setInfo(self):
if self.next:
self.setProperty(
'post.play.background',
self.next.art.asTranscodedImageURL(self.width, self.height, blur=128, opacity=60, background=colors.noAlpha.Background)
)
self.setProperty('info.title', self.next.title)
self.setProperty('info.duration', util.durationToText(self.next.duration.asInt()))
self.setProperty('info.summary', self.next.summary)
if self.prev:
self.setProperty(
'post.play.background',
self.prev.art.asTranscodedImageURL(self.width, self.height, blur=128, opacity=60, background=colors.noAlpha.Background)
)
self.setProperty('prev.info.title', self.prev.title)
self.setProperty('prev.info.duration', util.durationToText(self.prev.duration.asInt()))
self.setProperty('prev.info.summary', self.prev.summary)
if self.prev.type == 'episode':
if self.next:
self.setProperty('next.thumb', self.next.thumb.asTranscodedImageURL(*self.NEXT_DIM))
self.setProperty('related.header', T(32306, 'Related Shows'))
self.setProperty('info.date', util.cleanLeadingZeros(self.next.originallyAvailableAt.asDatetime('%B %d, %Y')))
self.setProperty('next.title', self.next.grandparentTitle)
self.setProperty(
'next.subtitle', u'{0} {1} \u2022 {2} {3}'.format(T(32303, 'Season'), self.next.parentIndex, T(32304, 'Episode'), self.next.index)
)
if self.prev:
self.setProperty('prev.thumb', self.prev.thumb.asTranscodedImageURL(*self.PREV_DIM))
self.setProperty('prev.title', self.prev.grandparentTitle)
self.setProperty(
'prev.subtitle', u'{0} {1} \u2022 {2} {3}'.format(T(32303, 'Season'), self.prev.parentIndex, T(32304, 'Episode'), self.prev.index)
)
self.setProperty('prev.info.date', util.cleanLeadingZeros(self.prev.originallyAvailableAt.asDatetime('%B %d, %Y')))
elif self.prev.type == 'movie':
if self.next:
self.setProperty('next.thumb', self.next.defaultArt.asTranscodedImageURL(*self.NEXT_DIM))
self.setProperty('related.header', T(32404, 'Related Movies'))
self.setProperty('info.date', self.next.year)
self.setProperty('next.title', self.next.title)
self.setProperty('next.subtitle', self.next.year)
if self.prev:
self.setProperty('prev.thumb', self.prev.defaultArt.asTranscodedImageURL(*self.PREV_DIM))
self.setProperty('prev.title', self.prev.title)
self.setProperty('prev.subtitle', self.prev.year)
self.setProperty('prev.info.date', self.prev.year)
def fillOnDeck(self):
items = []
idx = 0
onDeckHub = self.hubs.get('tv.ondeck', self.hubs.get('movie.similar'))
if not onDeckHub:
self.onDeckListControl.reset()
return False
for ondeck in onDeckHub.items:
title = ondeck.grandparentTitle or ondeck.title
if ondeck.type == 'episode':
thumb = ondeck.thumb.asTranscodedImageURL(*self.ONDECK_DIM)
else:
thumb = ondeck.defaultArt.asTranscodedImageURL(*self.ONDECK_DIM)
mli = kodigui.ManagedListItem(title or '', thumbnailImage=thumb, data_source=ondeck)
if mli:
mli.setProperty('index', str(idx))
mli.setProperty(
'thumb.fallback', 'script.plexodus/thumb_fallbacks/{0}.png'.format(ondeck.type in ('show', 'season', 'episode') and 'show' or 'movie')
)
if ondeck.type in 'episode':
mli.setLabel2(u'{0}{1} \u2022 {2}{3}'.format(T(32310, 'S'), ondeck.parentIndex, T(32311, 'E'), ondeck.index))
else:
mli.setLabel2(ondeck.year)
items.append(mli)
idx += 1
if not items:
return False
self.onDeckListControl.reset()
self.onDeckListControl.addItems(items)
return True
def fillRelated(self, has_prev=False):
items = []
idx = 0
video = self.next if self.next else self.prev
if not video.related:
self.relatedListControl.reset()
return False
for rel in video.related()[0].items:
mli = kodigui.ManagedListItem(rel.title or '', thumbnailImage=rel.thumb.asTranscodedImageURL(*self.RELATED_DIM), data_source=rel)
if mli:
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/{0}.png'.format(rel.type in ('show', 'season', 'episode') and 'show' or 'movie'))
mli.setProperty('index', str(idx))
items.append(mli)
idx += 1
if not items:
return False
self.setProperty('divider.{0}'.format(self.RELATED_LIST_ID), has_prev and '1' or '')
self.relatedListControl.reset()
self.relatedListControl.addItems(items)
return True
def fillRoles(self, has_prev=False):
items = []
idx = 0
video = self.next if self.next else self.prev
if not video.roles:
self.rolesListControl.reset()
return False
for role in video.roles():
mli = kodigui.ManagedListItem(role.tag, role.role, thumbnailImage=role.thumb.asTranscodedImageURL(*self.ROLES_DIM), data_source=role)
mli.setProperty('index', str(idx))
items.append(mli)
idx += 1
if not items:
return False
self.setProperty('divider.{0}'.format(self.ROLES_LIST_ID), has_prev and '1' or '')
self.rolesListControl.reset()
self.rolesListControl.addItems(items)
return True
def playVideo(self, prev=False):
self.cancelTimer()
try:
if not self.next and self.playlist:
if prev:
self.playlist.prev()
self.aborted = False
self.playQueue = self.playlist
self.video = None
self.play(handler=self.handler)
else:
video = self.next
if prev:
video = self.prev
if not video:
util.DEBUG_LOG('Trying to play next video with no next video available')
self.video = None
return
self.playQueue = None
self.video = video
self.play(handler=self.handler)
except:
util.ERROR()
def play(video=None, play_queue=None, resume=False):
w = VideoPlayerWindow.open(video=video, play_queue=play_queue, resume=resume)
player.PLAYER.off('session.ended', w.sessionEnded)
player.PLAYER.off('post.play', w.postPlay)
player.PLAYER.off('change.background', w.changeBackground)
command = w.exitCommand
del w
util.garbageCollect()
return command
|
flow_runner.py | import os
import signal
<<<<<<< HEAD
import sys
=======
>>>>>>> prefect clone
import threading
from time import sleep as time_sleep
from typing import Any, Callable, Dict, Iterable, Optional, Iterator
from contextlib import contextmanager
import pendulum
import prefect
from prefect.client import Client
from prefect.core import Flow, Task
from prefect.engine.cloud import CloudTaskRunner
from prefect.engine.flow_runner import FlowRunner, FlowRunnerInitializeResult
from prefect.engine.runner import ENDRUN
from prefect.engine.state import Failed, Queued, State, Cancelling, Cancelled
from prefect.utilities.exceptions import VersionLockError
from prefect.utilities.graphql import with_args
class CloudFlowRunner(FlowRunner):
"""
FlowRunners handle the execution of Flows and determine the State of a Flow
before, during and after the Flow is run.
In particular, through the FlowRunner you can specify which tasks should be
the first tasks to run, which tasks should be returned after the Flow is finished,
and what states each task should be initialized with.
Args:
- flow (Flow): the `Flow` to be run
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the flow changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the flow runner instance, the old (prior) state, and the new
(current) state, with the following signature:
```
state_handler(
flow_runner: FlowRunner,
old_state: State,
new_state: State) -> State
```
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
Note: new FlowRunners are initialized within the call to `Flow.run()` and in general,
this is the endpoint through which FlowRunners will be interacted with most frequently.
Example:
```python
@task
def say_hello():
print('hello')
with Flow("My Flow") as f:
say_hello()
fr = FlowRunner(flow=f)
flow_state = fr.run()
```
"""
def __init__(self, flow: Flow, state_handlers: Iterable[Callable] = None) -> None:
self.client = Client()
super().__init__(
flow=flow, task_runner_cls=CloudTaskRunner, state_handlers=state_handlers
)
def _heartbeat(self) -> bool:
try:
# use empty string for testing purposes
flow_run_id = prefect.context.get("flow_run_id", "") # type: str
self.client.update_flow_run_heartbeat(flow_run_id)
<<<<<<< HEAD
self.heartbeat_cmd = [
sys.executable,
"-m",
"prefect",
"heartbeat",
"flow-run",
"-i",
flow_run_id,
]
=======
self.heartbeat_cmd = ["prefect", "heartbeat", "flow-run", "-i", flow_run_id]
>>>>>>> prefect clone
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"flow": {"settings": True},
}
}
}
flow_run = self.client.graphql(query).data.flow_run_by_pk
if not flow_run.flow.settings.get("heartbeat_enabled", True):
return False
return True
except Exception:
self.logger.exception(
"Heartbeat failed for Flow '{}'".format(self.flow.name)
)
return False
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the FlowRunner uses to call its flow's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
raise_on_exception = prefect.context.get("raise_on_exception", False)
try:
new_state = super().call_runner_target_handlers(
old_state=old_state, new_state=new_state
)
except Exception as exc:
msg = "Exception raised while calling state handlers: {}".format(repr(exc))
self.logger.exception(msg)
if raise_on_exception:
raise exc
new_state = Failed(msg, result=exc)
flow_run_id = prefect.context.get("flow_run_id", None)
version = prefect.context.get("flow_run_version")
try:
cloud_state = new_state
state = self.client.set_flow_run_state(
flow_run_id=flow_run_id,
version=version if cloud_state.is_running() else None,
state=cloud_state,
)
except VersionLockError as exc:
state = self.client.get_flow_run_state(flow_run_id=flow_run_id)
if state.is_running():
self.logger.debug(
"Version lock encountered and flow is already in a running state."
)
raise ENDRUN(state=state) from exc
self.logger.debug(
"Version lock encountered, proceeding with state {}...".format(
type(state).__name__
)
)
new_state = state
except Exception as exc:
self.logger.exception(
"Failed to set flow state with error: {}".format(repr(exc))
)
raise ENDRUN(state=new_state) from exc
if state.is_queued():
state.state = old_state # type: ignore
raise ENDRUN(state=state)
prefect.context.update(flow_run_version=(version or 0) + 1)
return new_state
@contextmanager
def check_for_cancellation(self) -> Iterator:
"""Contextmanager used to wrap a cancellable section of a flow run."""
cancelling = False
done = threading.Event()
flow_run_version = None
context = prefect.context.to_dict()
def interrupt_if_cancelling() -> None:
# We need to copy the context into this thread, since context is a
# thread local.
with prefect.context(context):
flow_run_id = prefect.context["flow_run_id"]
while True:
exiting_context = done.wait(
prefect.config.cloud.check_cancellation_interval
)
try:
self.logger.debug("Checking flow run state...")
flow_run_info = self.client.get_flow_run_info(flow_run_id)
except Exception:
self.logger.warning(
"Error getting flow run info", exc_info=True
)
continue
if not flow_run_info.state.is_running():
self.logger.warning(
"Flow run is no longer in a running state; the current state is: %r",
flow_run_info.state,
)
if isinstance(flow_run_info.state, Cancelling):
self.logger.info(
"Flow run has been cancelled, cancelling active tasks"
)
nonlocal cancelling
nonlocal flow_run_version
cancelling = True
flow_run_version = flow_run_info.version
# If not already leaving context, raise KeyboardInterrupt in the main thread
if not exiting_context:
if os.name == "nt":
# This doesn't actually send a signal, so it will only
# interrupt the next Python bytecode instruction - if the
# main thread is blocked in a c extension the interrupt
# won't be seen until that returns.
from _thread import interrupt_main
interrupt_main()
else:
signal.pthread_kill(
threading.main_thread().ident, signal.SIGINT # type: ignore
)
break
elif exiting_context:
break
thread = threading.Thread(target=interrupt_if_cancelling, daemon=True)
thread.start()
try:
yield
except KeyboardInterrupt:
if not cancelling:
raise
finally:
done.set()
thread.join()
if cancelling:
prefect.context.update(flow_run_version=flow_run_version)
raise ENDRUN(state=Cancelled("Flow run is cancelled"))
def run(
self,
state: State = None,
task_states: Dict[Task, State] = None,
return_tasks: Iterable[Task] = None,
parameters: Dict[str, Any] = None,
task_runner_state_handlers: Iterable[Callable] = None,
<<<<<<< HEAD
executor: "prefect.executors.Executor" = None,
=======
executor: "prefect.engine.executors.Executor" = None,
>>>>>>> prefect clone
context: Dict[str, Any] = None,
task_contexts: Dict[Task, Dict[str, Any]] = None,
) -> State:
"""
The main endpoint for FlowRunners. Calling this method will perform all
computations contained within the Flow and return the final state of the Flow.
Args:
- state (State, optional): starting state for the Flow. Defaults to
`Pending`
- task_states (dict, optional): dictionary of task states to begin
computation with, with keys being Tasks and values their corresponding state
- return_tasks ([Task], optional): list of Tasks to include in the
final returned Flow state. Defaults to `None`
- parameters (dict, optional): dictionary of any needed Parameter
values, with keys being strings representing Parameter names and values being
their corresponding values
- task_runner_state_handlers (Iterable[Callable], optional): A list of state change
handlers that will be provided to the task_runner, and called whenever a task
changes state.
- executor (Executor, optional): executor to use when performing
computation; defaults to the executor specified in your prefect configuration
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
context = (context or {}).copy()
context.update(running_with_backend=True)
end_state = super().run(
state=state,
task_states=task_states,
return_tasks=return_tasks,
parameters=parameters,
task_runner_state_handlers=task_runner_state_handlers,
executor=executor,
context=context,
task_contexts=task_contexts,
)
# If start time is more than 10 minutes in the future,
# we fail the run so Lazarus can pick it up and reschedule it.
while end_state.is_queued() and (
end_state.start_time <= pendulum.now("utc").add(minutes=10) # type: ignore
):
assert isinstance(end_state, Queued)
time_remaining = max(
(end_state.start_time - pendulum.now("utc")).total_seconds(), 0
)
self.logger.debug(
(
f"Flow run is in a Queued state. Sleeping for at most {time_remaining:.2f} "
f"seconds and attempting to run again."
)
)
# Sleep until not in a queued state, then attempt to re-run
while time_remaining > 0:
delay = min(
prefect.config.cloud.check_cancellation_interval, time_remaining
)
time_remaining -= delay
# Imported `time.sleep` as `time_sleep` to allow monkeypatching in tests
time_sleep(delay)
flow_run_info = self.client.get_flow_run_info(
flow_run_id=prefect.context.get("flow_run_id")
)
context.update(flow_run_version=flow_run_info.version)
if not isinstance(flow_run_info.state, Queued):
break
# When concurrency slots become free, this will eventually result
# in a non queued state, but will result in more or less just waiting
# until the orchestration layer says we are clear to go. Purposefully
# not passing `state` so we can refresh the info from cloud,
# allowing us to prematurely bail out of flow runs that have already
# reached a finished state via another process.
end_state = super().run(
task_states=task_states,
return_tasks=return_tasks,
parameters=parameters,
task_runner_state_handlers=task_runner_state_handlers,
executor=executor,
context=context,
task_contexts=task_contexts,
)
return end_state
def initialize_run( # type: ignore
self,
state: Optional[State],
task_states: Dict[Task, State],
context: Dict[str, Any],
task_contexts: Dict[Task, Dict[str, Any]],
parameters: Dict[str, Any],
) -> FlowRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
If the provided state is a Submitted state, the state it wraps is extracted.
Args:
- state (Optional[State]): the initial state of the run
- task_states (Dict[Task, State]): a dictionary of any initial task states
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
- parameters(dict): the parameter values for the run
Returns:
- NamedTuple: a tuple of initialized objects:
`(state, task_states, context, task_contexts)`
"""
# load id from context
flow_run_id = prefect.context.get("flow_run_id")
try:
flow_run_info = self.client.get_flow_run_info(flow_run_id)
except Exception as exc:
self.logger.debug(
"Failed to retrieve flow state with error: {}".format(repr(exc))
)
if state is None:
state = Failed(
message="Could not retrieve state from Prefect Cloud", result=exc
)
raise ENDRUN(state=state) from exc
updated_context = context or {}
updated_context.update(flow_run_info.context or {})
updated_context.update(
flow_id=flow_run_info.flow_id,
flow_run_id=flow_run_info.id,
flow_run_version=flow_run_info.version,
flow_run_name=flow_run_info.name,
scheduled_start_time=flow_run_info.scheduled_start_time,
)
tasks = {slug: t for t, slug in self.flow.slugs.items()}
# update task states and contexts
for task_run in flow_run_info.task_runs:
try:
task = tasks[task_run.task_slug]
except KeyError as exc:
msg = (
f"Task slug {task_run.task_slug} not found in the current Flow; "
f"this is usually caused by changing the Flow without reregistering "
f"it with the Prefect API."
)
raise KeyError(msg) from exc
task_states.setdefault(task, task_run.state)
task_contexts.setdefault(task, {}).update(
task_id=task_run.task_id,
task_run_id=task_run.id,
task_run_version=task_run.version,
)
# if state is set, keep it; otherwise load from Cloud
state = state or flow_run_info.state # type: ignore
# update parameters, prioritizing kwarg-provided params
updated_parameters = flow_run_info.parameters or {} # type: ignore
updated_parameters.update(parameters)
return super().initialize_run(
state=state,
task_states=task_states,
context=updated_context,
task_contexts=task_contexts,
parameters=updated_parameters,
)
|
test_lsl_node.py | from pylsl import StreamInfo, StreamOutlet, StreamInlet, resolve_stream
import pytest
import asyncio
import time
from multiprocessing import Process
from typing import Sequence
from labgraph.graphs import (
AsyncPublisher,
Config,
Connections,
Graph,
Module,
Node,
Topic,
publisher,
subscriber
)
from labgraph.runners import LocalRunner, NormalTermination, ParallelRunner
from labgraph.util.logger import get_logger
from labgraph.util.testing import get_test_filename, local_test
from labgraph.devices.protocols.lsl import LSLMessage
from labgraph.devices.protocols.lsl.lsl_poller_node import (
LSLPollerConfig,
LSLPollerNode
)
from labgraph.devices.protocols.lsl.lsl_sender_node import (
LSLSenderConfig,
LSLSenderNode
)
NUM_MESSAGES = 10
SAMPLE_RATE = 10
logger = get_logger(__name__)
DATA_DELIMITER = "\n"
samples = [0.1, 1.1, 2.3, 4.4, 5.5, 6.6, 7.5, 7.7, 8.8, 9.9]
class MySinkConfig(Config):
output_filename: str = get_test_filename()
class MySink(Node):
"""
Convenience node for receiving messages from a `LSLSenderNode`.
"""
TOPIC = Topic(LSLMessage)
config: MySinkConfig
def setup(self) -> None:
self.output_file = open(self.config.output_filename, "w+")
self.num_received = 1
@subscriber(TOPIC)
async def sink(self, message: LSLMessage) -> None:
for value in message.data:
value_str = str(value)
self.output_file.write(value_str)
self.output_file.write(DATA_DELIMITER)
self.num_received += 1
if self.num_received == NUM_MESSAGES:
raise NormalTermination()
def cleanup(self) -> None:
self.output_file.close()
class MySourceConfig(Config):
should_terminate: bool = True
class MySource(Node):
"""
Convenience node for supplying messages to a LSLSenderNode
"""
TOPIC = Topic(LSLMessage)
config: MySourceConfig
@publisher(TOPIC)
async def source(self) -> AsyncPublisher:
yield self.TOPIC, LSLMessage(samples)
await asyncio.sleep(1/SAMPLE_RATE)
if self.config.should_terminate:
raise NormalTermination()
# send messages
def write_sample_to_lsl() -> None:
sample_rate = 100
name = 'Mock_Signal'
type = 'mock_type'
n_channels = 10
info = StreamInfo(name, type, n_channels, sample_rate,
'float32', 'myuid34234')
outlet = StreamOutlet(info)
logger.log(1, "now sending data...")
max_iter = 300
iter = 0
while iter < max_iter:
outlet.push_sample(samples)
iter += 1
time.sleep(0.01)
def recv_samples_from_lsl(output_fname: str) -> None:
streams = resolve_stream('type', 'mock_type')
inlet = StreamInlet(streams[0])
with open(output_fname, "w+") as output_file:
sample, timestamp = inlet.pull_sample()
for value in sample:
value_str = str(value)
output_file.write(value_str)
output_file.write(DATA_DELIMITER)
output_file.close()
@local_test
def test_lsl_poller_node() -> None:
class LSLPollerGraphConfig(Config):
output_filename: str
class LSLPollerGraph(Graph):
MY_SOURCE: LSLPollerNode
MY_SINK: MySink
config: LSLPollerGraphConfig
def setup(self) -> None:
self.MY_SOURCE.configure(
LSLPollerConfig(
type='mock_type'
)
)
self.MY_SINK.configure(
MySinkConfig(output_filename=self.config.output_filename)
)
def connections(self) -> Connections:
return ((self.MY_SOURCE.topic, self.MY_SINK.TOPIC),)
graph = LSLPollerGraph()
output_filename = get_test_filename()
graph.configure(
LSLPollerGraphConfig(
output_filename=output_filename
)
)
runner = LocalRunner(module=graph)
p = Process(target=write_sample_to_lsl, args=())
p.start()
runner.run()
p.join()
with open(output_filename, "r") as f:
data = f.read()
recieved_data = set(data.strip(DATA_DELIMITER).split(DATA_DELIMITER))
assert len(recieved_data) > 0
assert len(samples) == len(recieved_data)
@local_test
def test_lsl_sender_node() -> None:
class LSLSenderGraph(Graph):
MY_SOURCE: MySource
MY_SINK: LSLSenderNode
config: LSLSenderConfig
def setup(self) -> None:
self.MY_SOURCE.configure(MySourceConfig())
self.MY_SINK.configure(self.config)
def connections(self) -> Connections:
return ((self.MY_SOURCE.TOPIC, self.MY_SINK.topic),)
output_filename = get_test_filename()
graph = LSLSenderGraph()
graph.configure(LSLSenderConfig(
stream_name='Test',
stream_type='mock_type',
n_channels=NUM_MESSAGES,
unique_identifier='12345QE'
))
runner = LocalRunner(module=graph)
p = Process(
target=recv_samples_from_lsl, args=(output_filename,)
)
p.start()
runner.run()
p.join()
with open(output_filename, "r") as f:
data = f.read()
recieved_data = set(data.strip(DATA_DELIMITER).split(DATA_DELIMITER))
assert len(recieved_data) > 0
assert len(samples) == len(recieved_data)
@local_test
def test_lsl_send_and_poll() -> None:
class LSLGraphConfig(Config):
output_filename: str
class LSLGraph(Graph):
DF_SOURCE: MySource
LSL_SENDER: LSLSenderNode
LSL_POLLER: LSLPollerNode
DF_SINK: MySink
def setup(self) -> None:
self.DF_SOURCE.configure(MySourceConfig(should_terminate=True))
self.LSL_SENDER.configure(
LSLSenderConfig(
stream_name='mock name',
stream_type='mock type',
n_channels=NUM_MESSAGES,
unique_identifier='12345QE'
)
)
self.LSL_POLLER.configure(
LSLPollerConfig(
type='mock type'
)
)
self.DF_SINK.configure(
MySinkConfig(output_filename=self.config.output_filename)
)
def connections(self) -> Connections:
return(
(self.DF_SOURCE.TOPIC, self.LSL_SENDER.topic),
(self.LSL_POLLER.topic, self.DF_SINK.TOPIC)
)
def process_modules(self) -> Sequence[Module]:
return (self.DF_SOURCE, self.LSL_SENDER, self.LSL_POLLER,
self.DF_SINK)
output_filename = get_test_filename()
graph = LSLGraph()
graph.configure(
LSLGraphConfig(
output_filename=output_filename
)
)
runner = ParallelRunner(graph=graph)
runner.run()
with open(output_filename, "r") as f:
data = f.read()
recieved_data = set(data.strip(DATA_DELIMITER).split(DATA_DELIMITER))
assert len(recieved_data) == NUM_MESSAGES
assert len(samples) == len(recieved_data)
|
NozomiMain.py | import traceback
import requests
import wx
import re
import webbrowser
from wxglade_out import NozomiDownloader
from threading import Thread
from pathlib import Path
from nozomi import api
class NozomiDownloaderBody(NozomiDownloader):
def __init__(self, *args, **kw):
self.downloading = False
super(NozomiDownloaderBody, self).__init__(*args, **kw)
self.btnToggleStart.Bind(wx.EVT_BUTTON, self.startButtonClick)
self.tbPositiveTags.Bind(wx.EVT_TEXT, self.tagChanged)
self.tbNegativeTags.Bind(wx.EVT_TEXT, self.tagChanged)
self.btnYoutube.Bind(wx.EVT_BUTTON, self.youtube)
self.enableControls()
def disableControls(self):
self.tbPositiveTags.SetBackgroundColour(wx.LIGHT_GREY)
self.tbNegativeTags.SetBackgroundColour(wx.LIGHT_GREY)
self.tbDirectoryName.SetBackgroundColour(wx.LIGHT_GREY)
self.tbPositiveTags.SetEditable(False)
self.tbNegativeTags.SetEditable(False)
self.tbDirectoryName.SetEditable(False)
def enableControls(self):
self.tbPositiveTags.SetBackgroundColour(wx.WHITE)
self.tbNegativeTags.SetBackgroundColour(wx.WHITE)
self.tbDirectoryName.SetBackgroundColour(wx.WHITE)
self.tbPositiveTags.SetEditable(True)
self.tbNegativeTags.SetEditable(True)
self.tbDirectoryName.SetEditable(True)
def youtube(self, event):
webbrowser.open('https://youtube.com/c/devlala')
def startButtonClick(self, event):
if not self.tbPositiveTags.GetLineText(0):
wx.MessageBox("태그를 입력하세요")
return
if self.downloading:
self.btnToggleStart.SetLabelText('Start Download')
self.downloading = False
self.print("Download Stopped")
self.enableControls()
else:
self.btnToggleStart.SetLabelText('Stop Download')
self.downloading = True
self.startDownload()
self.disableControls()
def tagChanged(self, event: wx.CommandEvent):
p = self.tbPositiveTags.GetLineText(0)
n = self.tbNegativeTags.GetLineText(0)
if (not p) and (not n):
self.tbDirectoryName.SetLabelText("Downloads")
else:
pattern = re.compile('\s*,\s*')
positive_tags = pattern.split(p)
directoryName = ','.join(positive_tags);
if len(n) > 0:
negative_tags = pattern.split(n)
directoryName = directoryName + ' Not ' + ','.join(negative_tags)
self.tbDirectoryName.SetLabelText(directoryName.strip())
def startDownload(self):
pattern = re.compile('\s*,\s*')
p = self.tbPositiveTags.GetLineText(0)
n = self.tbNegativeTags.GetLineText(0)
positive_tags = pattern.split(p) if p else None
negative_tags = pattern.split(n) if n else None
downloadThread = Thread(target=self.run, args=(self.tbDirectoryName.GetLineText(0), positive_tags, negative_tags))
downloadThread.start()
def run(self, directory, positive_tags, negative_tags=None):
self.print("Download Start")
try:
for post in api.get_posts(positive_tags, negative_tags):
if not self.downloading: return
self.print("Post-width:" + str(post.width))
self.print("Post-height:" + str(post.height))
self.print("Post-id:" + str(post.sourceid))
retry = 3
while True:
try:
result = api.download_media(post, Path(Path.cwd(), directory))
if result: self.print(post.imageurl + ": Download Success")
else: self.print(post.imageurl + ": File Already Exists")
break;
except Exception as e:
if retry == 0: break;
self.print(post.imageurl + ": IO Error (Read Timeout... or something) retry " + str(retry))
retry = retry - 1
wx.MessageBox("다운로드 완료")
self.downloading = False
self.btnToggleStart.SetLabelText('Start Download')
self.enableControls()
except Exception as e:
traceback.print_exc()
wx.MessageBox("태그검색결과 오류")
self.downloading = False
self.btnToggleStart.SetLabelText('Start Download')
self.enableControls()
def print(self, string):
if self.tbConsole.GetLineText(0):
self.tbConsole.AppendText("\n")
self.tbConsole.AppendText(string)
def main():
app = wx.App()
ex = NozomiDownloaderBody(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
app_mt.py | '''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
from typing import List
import cv2
import numpy as np
import vart
import os
import pathlib
import xir
import threading
import time
import sys
import argparse
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image/255.0
return image
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (root_subgraph is not None), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def runDPU(id,start,dpu,img):
'''get tensor'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims)
output_ndim = tuple(outputTensors[0].dims)
batchSize = input_ndim[0]
n_of_images = len(img)
count = 0
write_index = start
while count < n_of_images:
if (count+batchSize<=n_of_images):
runSize = batchSize
else:
runSize=n_of_images-count
'''prepare batch input/output '''
outputData = []
inputData = []
inputData = [np.empty(input_ndim, dtype=np.float32, order="C")]
outputData = [np.empty(output_ndim, dtype=np.float32, order="C")]
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = img[(count + j) % n_of_images].reshape(input_ndim[1:])
'''run with batch '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
'''store output vectors '''
for j in range(runSize):
out_q[write_index] = np.argmax((outputData[0][j]))
write_index += 1
count = count + runSize
def app(image_dir,threads,model):
listimage=os.listdir(image_dir)
runTotal = len(listimage)
global out_q
out_q = [None] * runTotal
g = xir.Graph.deserialize(model)
subgraphs = get_child_subgraph_dpu(g)
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
''' preprocess images '''
print('Pre-processing',runTotal,'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
'''run threads '''
print('Starting',threads,'threads...')
threadAll = []
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotal / timetotal)
print("Throughput=%.2f fps, total frames = %.0f, time=%.4f seconds" %(fps, runTotal, timetotal))
''' post-processing '''
classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
correct = 0
wrong = 0
print('output buffer length:',len(out_q))
for i in range(len(out_q)):
prediction = classes[out_q[i]]
ground_truth, _ = listimage[i].split('_',1)
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print('Correct:%d, Wrong:%d, Accuracy:%.4f' %(correct,wrong,accuracy))
return
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir', type=str, default='images', help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads', type=int, default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str, default='model_dir/densenetx.xmodel', help='Path of xmodel. Default is model_dir/densenetx.xmodel')
args = ap.parse_args()
print ('Command line options:')
print (' --image_dir : ', args.image_dir)
print (' --threads : ', args.threads)
print (' --model : ', args.model)
app(args.image_dir,args.threads,args.model)
if __name__ == '__main__':
main()
|
build_graph_manually.py | import sys
import time
from threading import Thread
import cv2
from pynput.keyboard import Key, Listener
from algorithms.utils.algo_utils import main_observation
from algorithms.utils.env_wrappers import reset_with_info
from algorithms.tmax.agent_tmax import AgentTMAX
from algorithms.tmax.tmax_utils import parse_args_tmax
from algorithms.topological_maps.topological_map import TopologicalMap, get_position, get_angle
from utils.envs.atari import atari_utils
from utils.envs.doom import doom_utils
from utils.envs.envs import create_env
from utils.envs.generate_env_map import generate_env_map
from utils.utils import log, model_dir
add_landmark = False
terminate = False
current_actions = []
key_to_action = None
# noinspection PyCallingNonCallable
def on_press(key):
if key == Key.esc:
global terminate
terminate = True
return False
global current_actions
action = key_to_action(key)
if action is not None:
if action not in current_actions:
current_actions.append(action)
global add_landmark
if key == Key.enter:
add_landmark = True
# noinspection PyCallingNonCallable
def on_release(key):
global current_actions
action = key_to_action(key)
if action is not None:
if action in current_actions:
current_actions.remove(action)
def build_graph(params, env_id, max_num_episodes=1000):
def make_env_func():
e = create_env(env_id, mode='test', skip_frames=False)
e.seed(0)
return e
checkpoint_dir = model_dir(params.experiment_dir())
map_img, coord_limits = generate_env_map(make_env_func)
env = make_env_func()
m = None
for _ in range(max_num_episodes):
env_obs, info = reset_with_info(env)
obs = main_observation(env_obs)
done = False
if m is None:
m = TopologicalMap(obs, directed_graph=False, initial_info=info, verbose=True)
m.maybe_load_checkpoint(checkpoint_dir)
while not done and not terminate:
env.render()
if len(current_actions) > 0:
action = current_actions[-1]
else:
action = 0
env_obs, rew, done, info = env.step(action)
obs = main_observation(env_obs)
global add_landmark
if add_landmark:
# noinspection PyProtectedMember
new_idx = m._add_new_node(obs=obs, pos=get_position(info), angle=get_angle(info))
log.info('Added landmark idx %d', new_idx)
add_landmark = False
res = m.save_checkpoint(checkpoint_dir, map_img=map_img, coord_limits=coord_limits, verbose=True)
cv2.imshow('map', cv2.imread(res.graph_filename))
cv2.waitKey(50)
if terminate:
break
else:
env.render()
time.sleep(0.2)
m.save_checkpoint(checkpoint_dir, map_img=map_img, coord_limits=coord_limits, verbose=True)
log.debug('Set breakpoint here to edit graph edges before saving...')
log.info('Saving to %s...', checkpoint_dir)
m.save_checkpoint(checkpoint_dir, map_img=map_img, coord_limits=coord_limits, verbose=True)
env.close()
return 0
def main():
args, params = parse_args_tmax(AgentTMAX.Params)
env_id = args.env
global key_to_action
if 'dmlab' in env_id:
from utils.envs.dmlab import play_dmlab
key_to_action = play_dmlab.key_to_action
elif 'atari' in env_id:
key_to_action = atari_utils.key_to_action
elif 'doom' in env_id:
key_to_action = doom_utils.key_to_action
else:
raise Exception('Unknown env')
# start keypress listener (to pause/resume execution or exit)
def start_listener():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
status = build_graph(params, args.env)
if not terminate:
log.debug('Press ESC to exit...')
listener_thread.join()
return status
if __name__ == '__main__':
sys.exit(main())
|
TestController.py | import collections
import os
import random
import threading
import collections
import shutil
import sys
import tempfile
import time
import traceback
import unittest
import wx
from . import HydrusConstants as HC
from . import ClientConstants as CC
from . import HydrusGlobals as HG
from . import ClientAPI
from . import ClientDefaults
from . import ClientNetworking
from . import ClientNetworkingBandwidth
from . import ClientNetworkingDomain
from . import ClientNetworkingLogin
from . import ClientNetworkingSessions
from . import ClientServices
from . import ClientThreading
from . import HydrusExceptions
from . import HydrusPubSub
from . import HydrusSessions
from . import HydrusTags
from . import HydrusThreading
from . import TestClientAPI
from . import TestClientConstants
from . import TestClientDaemons
from . import TestClientData
from . import TestClientImageHandling
from . import TestClientImportOptions
from . import TestClientImportSubscriptions
from . import TestClientListBoxes
from . import TestClientNetworking
from . import TestClientThreading
from . import TestDialogs
from . import TestDB
from . import TestFunctions
from . import TestHydrusNATPunch
from . import TestHydrusNetworking
from . import TestHydrusSerialisable
from . import TestHydrusServer
from . import TestHydrusSessions
from . import TestHydrusTags
from twisted.internet import reactor
from . import ClientCaches
from . import ClientData
from . import ClientOptions
from . import HydrusData
from . import HydrusPaths
DB_DIR = None
tiniest_gif = b'\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\xFF\x00\x2C\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x00\x3B'
LOCAL_RATING_LIKE_SERVICE_KEY = HydrusData.GenerateKey()
LOCAL_RATING_NUMERICAL_SERVICE_KEY = HydrusData.GenerateKey()
def ConvertServiceKeysToContentUpdatesToComparable( service_keys_to_content_updates ):
comparable_dict = {}
for ( service_key, content_updates ) in list(service_keys_to_content_updates.items()):
comparable_dict[ service_key ] = set( content_updates )
return comparable_dict
class MockController( object ):
def __init__( self ):
self.model_is_shutdown = False
self.new_options = ClientOptions.ClientOptions()
def CallToThread( self, callable, *args, **kwargs ):
return HG.test_controller.CallToThread( callable, *args, **kwargs )
def JustWokeFromSleep( self ):
return False
def ModelIsShutdown( self ):
return self.model_is_shutdown or HG.test_controller.ModelIsShutdown()
def pub( self, *args, **kwargs ):
pass
def sub( self, *args, **kwargs ):
pass
class MockServicesManager( object ):
def __init__( self, services ):
self._service_keys_to_services = { service.GetServiceKey() : service for service in services }
def GetName( self, service_key ):
return self._service_keys_to_services[ service_key ].GetName()
def GetService( self, service_key ):
return self._service_keys_to_services[ service_key ]
def ServiceExists( self, service_key ):
return service_key in self._service_keys_to_services
class FakeWebSessionManager():
def EnsureLoggedIn( self, name ):
pass
def GetCookies( self, *args, **kwargs ):
return { 'session_cookie' : 'blah' }
class TestFrame( wx.Frame ):
def __init__( self ):
wx.Frame.__init__( self, None )
def SetPanel( self, panel ):
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
self.Fit()
self.Show()
only_run = None
class Controller( object ):
def __init__( self, win, only_run ):
self.win = win
self.only_run = only_run
self.db_dir = tempfile.mkdtemp()
global DB_DIR
DB_DIR = self.db_dir
self._server_files_dir = os.path.join( self.db_dir, 'server_files' )
self._updates_dir = os.path.join( self.db_dir, 'test_updates' )
client_files_default = os.path.join( self.db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( self._server_files_dir )
HydrusPaths.MakeSureDirectoryExists( self._updates_dir )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
HG.controller = self
HG.client_controller = self
HG.server_controller = self
HG.test_controller = self
self.gui = self
self._call_to_threads = []
self._pubsub = HydrusPubSub.HydrusPubSub( self )
self.new_options = ClientOptions.ClientOptions()
HC.options = ClientDefaults.GetClientDefaultOptions()
self.options = HC.options
def show_text( text ): pass
HydrusData.ShowText = show_text
self._reads = {}
self._reads[ 'local_booru_share_keys' ] = []
self._reads[ 'messaging_sessions' ] = []
self._reads[ 'tag_censorship' ] = []
self._reads[ 'options' ] = ClientDefaults.GetClientDefaultOptions()
self._reads[ 'file_system_predicates' ] = []
self._reads[ 'media_results' ] = []
self.example_tag_repo_service_key = HydrusData.GenerateKey()
services = []
services.append( ClientServices.GenerateService( CC.LOCAL_BOORU_SERVICE_KEY, HC.LOCAL_BOORU, 'local booru' ) )
services.append( ClientServices.GenerateService( CC.CLIENT_API_SERVICE_KEY, HC.CLIENT_API_SERVICE, 'client api' ) )
services.append( ClientServices.GenerateService( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, HC.COMBINED_LOCAL_FILE, 'all local files' ) )
services.append( ClientServices.GenerateService( CC.LOCAL_FILE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, 'my files' ) )
services.append( ClientServices.GenerateService( CC.TRASH_SERVICE_KEY, HC.LOCAL_FILE_TRASH_DOMAIN, 'trash' ) )
services.append( ClientServices.GenerateService( CC.LOCAL_TAG_SERVICE_KEY, HC.LOCAL_TAG, 'local tags' ) )
services.append( ClientServices.GenerateService( self.example_tag_repo_service_key, HC.TAG_REPOSITORY, 'example tag repo' ) )
services.append( ClientServices.GenerateService( CC.COMBINED_TAG_SERVICE_KEY, HC.COMBINED_TAG, 'all known tags' ) )
services.append( ClientServices.GenerateService( LOCAL_RATING_LIKE_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'example local rating like service' ) )
services.append( ClientServices.GenerateService( LOCAL_RATING_NUMERICAL_SERVICE_KEY, HC.LOCAL_RATING_NUMERICAL, 'example local rating numerical service' ) )
self._reads[ 'services' ] = services
client_files_locations = {}
for prefix in HydrusData.IterateHexPrefixes():
for c in ( 'f', 't' ):
client_files_locations[ c + prefix ] = client_files_default
self._reads[ 'client_files_locations' ] = client_files_locations
self._reads[ 'sessions' ] = []
self._reads[ 'tag_parents' ] = {}
self._reads[ 'tag_siblings' ] = {}
self._reads[ 'in_inbox' ] = False
self._writes = collections.defaultdict( list )
self._managers = {}
self.services_manager = ClientCaches.ServicesManager( self )
self.client_files_manager = ClientCaches.ClientFilesManager( self )
self.parsing_cache = ClientCaches.ParsingCache()
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
session_manager = ClientNetworkingSessions.NetworkSessionManager()
domain_manager = ClientNetworkingDomain.NetworkDomainManager()
ClientDefaults.SetDefaultDomainManagerData( domain_manager )
login_manager = ClientNetworkingLogin.NetworkLoginManager()
self.network_engine = ClientNetworking.NetworkEngine( self, bandwidth_manager, session_manager, domain_manager, login_manager )
self.CallToThreadLongRunning( self.network_engine.MainLoop )
self.tag_censorship_manager = ClientCaches.TagCensorshipManager( self )
self.tag_siblings_manager = ClientCaches.TagSiblingsManager( self )
self.tag_parents_manager = ClientCaches.TagParentsManager( self )
self._managers[ 'undo' ] = ClientCaches.UndoManager( self )
self.server_session_manager = HydrusSessions.HydrusSessionManagerServer()
self.bitmap_manager = ClientCaches.BitmapManager( self )
self.local_booru_manager = ClientCaches.LocalBooruCache( self )
self.client_api_manager = ClientAPI.APIManager()
self._cookies = {}
self._job_scheduler = HydrusThreading.JobScheduler( self )
self._job_scheduler.start()
def _GetCallToThread( self ):
for call_to_thread in self._call_to_threads:
if not call_to_thread.CurrentlyWorking():
return call_to_thread
if len( self._call_to_threads ) > 100:
raise Exception( 'Too many call to threads!' )
call_to_thread = HydrusThreading.THREADCallToThread( self, 'CallToThread' )
self._call_to_threads.append( call_to_thread )
call_to_thread.start()
return call_to_thread
def _SetupWx( self ):
self.locale = wx.Locale( wx.LANGUAGE_DEFAULT ) # Very important to init this here and keep it non garbage collected
CC.GlobalBMPs.STATICInitialise()
self.frame_icon = wx.Icon( os.path.join( HC.STATIC_DIR, 'hydrus_32_non-transparent.png' ), wx.BITMAP_TYPE_PNG )
def pub( self, topic, *args, **kwargs ):
pass
def pubimmediate( self, topic, *args, **kwargs ):
self._pubsub.pubimmediate( topic, *args, **kwargs )
def sub( self, object, method_name, topic ):
self._pubsub.sub( object, method_name, topic )
def AcquirePageKey( self ):
return HydrusData.GenerateKey()
def CallBlockingToWX( self, win, func, *args, **kwargs ):
def wx_code( win, job_key ):
try:
if win is not None and not win:
raise HydrusExceptions.WXDeadWindowException( 'Parent Window was destroyed before wx command was called!' )
result = func( *args, **kwargs )
job_key.SetVariable( 'result', result )
except ( HydrusExceptions.WXDeadWindowException, HydrusExceptions.InsufficientCredentialsException, HydrusExceptions.ShutdownException ) as e:
job_key.SetVariable( 'error', e )
except Exception as e:
job_key.SetVariable( 'error', e )
HydrusData.Print( 'CallBlockingToWX just caught this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
finally:
job_key.Finish()
job_key = ClientThreading.JobKey()
job_key.Begin()
wx.CallAfter( wx_code, win, job_key )
while not job_key.IsDone():
if HG.model_shutdown:
raise HydrusExceptions.ShutdownException( 'Application is shutting down!' )
time.sleep( 0.05 )
if job_key.HasVariable( 'result' ):
# result can be None, for wx_code that has no return variable
result = job_key.GetIfHasVariable( 'result' )
return result
error = job_key.GetIfHasVariable( 'error' )
if error is not None:
raise error
raise HydrusExceptions.ShutdownException()
def CallToThread( self, callable, *args, **kwargs ):
call_to_thread = self._GetCallToThread()
call_to_thread.put( callable, *args, **kwargs )
CallToThreadLongRunning = CallToThread
def CallLater( self, initial_delay, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = HydrusThreading.SchedulableJob( self, self._job_scheduler, initial_delay, call )
self._job_scheduler.AddJob( job )
return job
def CallLaterWXSafe( self, window, initial_delay, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = ClientThreading.WXAwareJob( self, self._job_scheduler, window, initial_delay, call )
self._job_scheduler.AddJob( job )
return job
def CallRepeating( self, initial_delay, period, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = HydrusThreading.RepeatingJob( self, self._job_scheduler, initial_delay, period, call )
self._job_scheduler.AddJob( job )
return job
def CallRepeatingWXSafe( self, window, initial_delay, period, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = ClientThreading.WXAwareRepeatingJob( self, self._job_scheduler, window, initial_delay, period, call )
self._job_scheduler.AddJob( job )
return job
def ClearWrites( self, name ):
if name in self._writes:
del self._writes[ name ]
def DBCurrentlyDoingJob( self ):
return False
def GetFilesDir( self ):
return self._server_files_dir
def GetNewOptions( self ):
return self.new_options
def GetManager( self, manager_type ):
return self._managers[ manager_type ]
def GetWrite( self, name ):
write = self._writes[ name ]
del self._writes[ name ]
return write
def ImportURLFromAPI( self, url, service_keys_to_tags, destination_page_name, show_destination_page ):
normalised_url = self.network_engine.domain_manager.NormaliseURL( url )
human_result_text = '"{}" URL added successfully.'.format( normalised_url )
self.Write( 'import_url_test', url, service_keys_to_tags, destination_page_name, show_destination_page )
return ( normalised_url, human_result_text )
def IsBooted( self ):
return True
def IsCurrentPage( self, page_key ):
return False
def IsFirstStart( self ):
return True
def IShouldRegularlyUpdate( self, window ):
return True
def JustWokeFromSleep( self ):
return False
def ModelIsShutdown( self ):
return HG.model_shutdown
def PageAlive( self, page_key ):
return False
def PageClosedButNotDestroyed( self, page_key ):
return False
def Read( self, name, *args, **kwargs ):
return self._reads[ name ]
def RegisterUIUpdateWindow( self, window ):
pass
def ReleasePageKey( self, page_key ):
pass
def ReportDataUsed( self, num_bytes ):
pass
def ReportRequestUsed( self ):
pass
def ResetIdleTimer( self ): pass
def Run( self, window ):
# we are in wx thread here, we can do this
self._SetupWx()
suites = []
if self.only_run is None:
run_all = True
else:
run_all = False
# the gui stuff runs fine on its own but crashes in the full test if it is not early, wew
# something to do with the delayed button clicking stuff
if run_all or self.only_run == 'gui':
suites.append( unittest.TestLoader().loadTestsFromModule( TestDialogs ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientListBoxes ) )
if run_all or self.only_run == 'client_api':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientAPI ) )
if run_all or self.only_run == 'daemons':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientDaemons ) )
if run_all or self.only_run == 'data':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientConstants ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientData ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImportOptions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientThreading ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestFunctions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusSerialisable ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusSessions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusTags ) )
if run_all or self.only_run == 'db':
suites.append( unittest.TestLoader().loadTestsFromModule( TestDB ) )
if run_all or self.only_run == 'networking':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientNetworking ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusNetworking ) )
if run_all or self.only_run == 'import':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImportSubscriptions ) )
if run_all or self.only_run == 'image':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImageHandling ) )
if run_all or self.only_run == 'nat':
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusNATPunch ) )
if run_all or self.only_run == 'server':
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusServer ) )
suite = unittest.TestSuite( suites )
runner = unittest.TextTestRunner( verbosity = 2 )
def do_it():
try:
runner.run( suite )
finally:
wx.CallAfter( self.win.DestroyLater )
self.win.Show()
test_thread = threading.Thread( target = do_it )
test_thread.start()
def SetRead( self, name, value ):
self._reads[ name ] = value
def SetStatusBarDirty( self ):
pass
def SetWebCookies( self, name, value ):
self._cookies[ name ] = value
def TidyUp( self ):
time.sleep( 2 )
HydrusPaths.DeletePath( self.db_dir )
def ViewIsShutdown( self ):
return HG.view_shutdown
def WaitUntilModelFree( self ):
return
def WaitUntilViewFree( self ):
return
def Write( self, name, *args, **kwargs ):
self._writes[ name ].append( ( args, kwargs ) )
def WriteSynchronous( self, name, *args, **kwargs ):
self._writes[ name ].append( ( args, kwargs ) )
if name == 'import_file':
( file_import_job, ) = args
if file_import_job.GetHash().hex() == 'a593942cb7ea9ffcd8ccf2f0fa23c338e23bfecd9a3e508dfc0bcf07501ead08': # 'blarg' in sha256 hex
raise Exception( 'File failed to import for some reason!' )
else:
return ( CC.STATUS_SUCCESSFUL_AND_NEW, 'test note' )
|
pi_controller.py | import logging
import os
import time
import traceback
import sys
from datetime import date, datetime
from tanager_tcp.tanager_server import TanagerServer
from tanager_tcp.tanager_client import TanagerClient
from threading import Thread
from pi_feeder import goniometer
INTERVAL = 0.25
CONFIG_LOC = os.path.join(os.path.expanduser("~"), ".tanager_config")
ENCODER_CONFIG_PATH = os.path.join(CONFIG_LOC, "encoder_config.txt")
AZ_CONFIG_PATH = os.path.join(CONFIG_LOC, "az_config.txt")
LOG_PATH = os.path.join(CONFIG_LOC, "pi_feeder")
def main():
if not os.path.isdir(CONFIG_LOC):
os.mkdir(CONFIG_LOC)
today = date.today()
now = today.strftime("%b-%d-%Y")
now += f"_{datetime.now().hour}_{datetime.now().minute}"
log_path = f"{LOG_PATH}_{now}.log"
logging.basicConfig(filename=log_path, level=logging.DEBUG)
print(f"Logging to {log_path}")
pi_controller = PiController()
pi_controller.listen()
class PiController:
def __init__(self):
try:
with open(ENCODER_CONFIG_PATH, "r") as config_file:
i_zero = float(config_file.readline())
e_zero = float(config_file.readline())
az_zero = float(config_file.readline())
tray_zero = float(config_file.readline())
self.goniometer = goniometer.Goniometer(i_zero, e_zero, az_zero, tray_zero)
except (FileNotFoundError, NotADirectoryError):
dir_path = os.path.split(ENCODER_CONFIG_PATH)[0]
logging.info(f"Encoder config file not found, creating new one at {ENCODER_CONFIG_PATH}")
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
self.write_encoder_config(0, 0, 0, 0)
self.goniometer = goniometer.Goniometer()
current_az = 0
try:
with open(AZ_CONFIG_PATH, "r") as config_file:
current_az = float(config_file.readline())
except (FileNotFoundError, NotADirectoryError):
dir_path = os.path.split(AZ_CONFIG_PATH)[0]
logging.info(f"Az config file not found, creating new one at {AZ_CONFIG_PATH}")
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
self.write_az_config(0)
logging.info("Homing azimuth")
self.goniometer.home_azimuth()
logging.info(f"Setting to last known azimuth: {current_az}")
self.goniometer.set_position("azimuth", current_az)
self.cmdfiles0 = []
self.dir = "forward"
self.server = TanagerServer(12345)
self.client = TanagerClient(self.server.remote_server_address, 12345)
thread = Thread(target=self.server.listen)
thread.start()
def listen(self):
while True:
try:
self._listen()
except:
logging.info(traceback.print_exc())
def _listen(self):
logging.info("listening!")
t = 0
while True:
while len(self.server.queue) > 0:
if self.server.remote_server_address != self.client.server_address:
logging.info("Setting control computer address:")
self.client.server_address = self.server.remote_server_address
logging.info(self.client.server_address)
message = self.server.queue.pop(0)
cmd, params = self.decrypt(message)
for x in range(10):
cmd = cmd.replace(str(x), "")
if cmd != "test":
logging.info(cmd)
if cmd == "movetray":
if "steps" in params:
steps = int(params[0])
self.goniometer.motors["sample tray"]["motor"].move_steps(steps)
filename = self.encrypt("donemovingtray")
else:
status = self.goniometer.set_position("sample tray", params[0])
if status["complete"]:
filename = self.encrypt("donemovingtray")
else:
filename = self.encrypt("failuremovingtray" + str(int(status["position"])))
self.send(filename)
elif cmd == "moveemission":
if "steps" in params:
steps = int(params[0])
self.goniometer.motors["emission"]["motor"].move_steps(steps)
filename = self.encrypt("donemovingemission")
else:
if self.goniometer.emission == None:
filename = self.encrypt("nopiconfig")
else:
status = self.goniometer.set_position("emission", int(params[0]))
if status["complete"]:
filename = self.encrypt("donemovingemission")
else:
filename = self.encrypt("failuremovingemission" + str(int(status["position"])))
self.send(filename)
elif cmd == "moveincidence":
if "steps" in params:
steps = int(params[0])
self.goniometer.motors["incidence"]["motor"].move_steps(steps)
filename = self.encrypt("donemovingincidence")
else:
if self.goniometer.incidence == None:
filename = self.encrypt("nopiconfig")
else:
status = self.goniometer.set_position("incidence", int(params[0]))
if status["complete"]:
filename = self.encrypt("donemovingincidence")
else:
filename = self.encrypt("failuremovingincidence" + str(status["position"]))
self.send(filename)
elif cmd == "moveazimuth":
if "steps" in params:
steps = int(params[0])
self.goniometer.motors["azimuth"]["motor"].move_steps(steps)
filename = self.encrypt("donemovingazimuth")
if self.goniometer.azimuth == None:
filename = self.encrypt("nopiconfig")
else:
status = self.goniometer.set_position("azimuth", int(params[0]))
filename = self.encrypt("donemovingazimuth" + str(int(status["position"])))
logging.info("Writing az config")
self.write_az_config(self.goniometer.azimuth)
self.send(filename)
elif cmd == "configure":
if params[2].upper() == "WR":
params[2] = -1
self.goniometer.configure(float(params[0]), float(params[1]), int(params[2]))
self.write_encoder_config(
self.goniometer.motors["incidence"]["motor"].encoder._zero_position,
self.goniometer.motors["emission"]["motor"].encoder._zero_position,
self.goniometer.motors["azimuth"]["motor"].encoder._zero_position,
self.goniometer.motors["sample tray"]["motor"].encoder._zero_position,
)
filename = self.encrypt(
"piconfigsuccess",
[
str(self.goniometer.incidence),
str(self.goniometer.emission),
str(self.goniometer.azimuth),
str(self.goniometer.tray_pos),
],
)
self.send(filename)
elif cmd == "getcurrentposition":
self.goniometer.move_tray_to_nearest()
self.goniometer.update_position()
filename = self.encrypt(
"currentposition",
[
str(self.goniometer.incidence),
str(self.goniometer.emission),
str(self.goniometer.azimuth),
str(self.goniometer.tray_pos),
],
)
self.send(filename)
t = t + INTERVAL
time.sleep(INTERVAL)
def write_encoder_config(self, i, e, az, tray):
with open(ENCODER_CONFIG_PATH, "w+") as config_file:
config_file.write(f"{i}\n")
config_file.write(f"{e}\n")
config_file.write(f"{az}\n")
config_file.write(f"{tray}\n")
def write_az_config(self, az):
with open(AZ_CONFIG_PATH, "w+") as config_file:
config_file.write(f"{az}\n")
def send(self, message):
sent = self.client.send(message)
while not sent:
logging.info("Failed to send message, retrying.")
logging.info(message)
time.sleep(2)
sent = self.client.send(message)
def encrypt(self, cmd, parameters=None):
filename = cmd
if parameters:
for param in parameters:
param = param.replace("/", "+")
param = param.replace("\\", "+")
param = param.replace(":", "=")
filename = filename + "&" + param
return filename
def decrypt(self, encrypted):
cmd = encrypted.split("&")[0]
params = encrypted.split("&")[1:]
i = 0
for param in params:
params[i] = param.replace("+", "\\").replace("=", ":")
params[i] = params[i].replace("++", "+")
i = i + 1
return cmd, params
if __name__ == "__main__":
main()
|
sessions.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import contextlib
import itertools
import os
import threading
import time
from ptvsd.common import fmt, log, messaging, sockets, util
from ptvsd.adapter import components, launchers, servers
_lock = threading.RLock()
_sessions = set()
_sessions_changed = threading.Event()
class Session(util.Observable):
"""A debug session involving an IDE, an adapter, a launcher, and a debug server.
The IDE and the adapter are always present, and at least one of launcher and debug
server is present, depending on the scenario.
"""
_counter = itertools.count(1)
def __init__(self):
from ptvsd.adapter import ide
super(Session, self).__init__()
self.lock = threading.RLock()
self.id = next(self._counter)
self._changed_condition = threading.Condition(self.lock)
self.ide = components.missing(self, ide.IDE)
"""The IDE component. Always present."""
self.launcher = components.missing(self, launchers.Launcher)
"""The launcher componet. Always present in "launch" sessions, and never
present in "attach" sessions.
"""
self.server = components.missing(self, servers.Server)
"""The debug server component. Always present, unless this is a "launch"
session with "noDebug".
"""
self.no_debug = None
"""Whether this is a "noDebug" session."""
self.pid = None
"""Process ID of the debuggee process."""
self.debug_options = {}
"""Debug options as specified by "launch" or "attach" request."""
self.is_finalizing = False
"""Whether finalize() has been invoked."""
self.observers += [lambda *_: self.notify_changed()]
def __str__(self):
return fmt("Session-{0}", self.id)
def __enter__(self):
"""Lock the session for exclusive access."""
self.lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Unlock the session."""
self.lock.release()
def register(self):
with _lock:
_sessions.add(self)
_sessions_changed.set()
def notify_changed(self):
with self:
self._changed_condition.notify_all()
# A session is considered ended once all components disconnect, and there
# are no further incoming messages from anything to handle.
components = self.ide, self.launcher, self.server
if all(not com or not com.is_connected for com in components):
with _lock:
if self in _sessions:
log.info("{0} has ended.", self)
_sessions.remove(self)
_sessions_changed.set()
def wait_for(self, predicate, timeout=None):
"""Waits until predicate() becomes true.
The predicate is invoked with the session locked. If satisfied, the method
returns immediately. Otherwise, the lock is released (even if it was held
at entry), and the method blocks waiting for some attribute of either self,
self.ide, self.server, or self.launcher to change. On every change, session
is re-locked and predicate is re-evaluated, until it is satisfied.
While the session is unlocked, message handlers for components other than
the one that is waiting can run, but message handlers for that one are still
blocked.
If timeout is not None, the method will unblock and return after that many
seconds regardless of whether the predicate was satisfied. The method returns
False if it timed out, and True otherwise.
"""
def wait_for_timeout():
time.sleep(timeout)
wait_for_timeout.timed_out = True
self.notify_changed()
wait_for_timeout.timed_out = False
if timeout is not None:
thread = threading.Thread(
target=wait_for_timeout, name="Session.wait_for() timeout"
)
thread.daemon = True
thread.start()
with self:
while not predicate():
if wait_for_timeout.timed_out:
return False
self._changed_condition.wait()
return True
@contextlib.contextmanager
def _accept_connection_from(self, what, address, timeout=None):
"""Sets up a listening socket, accepts an incoming connection on it, sets
up a message stream over that connection, and passes it on to what().
Can be used in a with-statement to obtain the actual address of the listener
socket before blocking on accept()::
with accept_connection_from_server(...) as (host, port):
# listen() returned - listening on (host, port) now
...
# accept() returned - connection established
"""
host, port = address
listener = sockets.create_server(host, port, timeout)
host, port = listener.getsockname()
log.info(
"{0} waiting for incoming connection from {1} on {2}:{3}...",
self,
what.__name__,
host,
port,
)
yield host, port
try:
sock, (other_host, other_port) = listener.accept()
finally:
listener.close()
log.info(
"{0} accepted incoming connection {1} from {2}:{3}.",
self,
what.__name__,
other_host,
other_port,
)
stream = messaging.JsonIOStream.from_socket(sock, what)
what(self, stream)
def accept_connection_from_launcher(self, address=("127.0.0.1", 0)):
return self._accept_connection_from(launchers.Launcher, address, timeout=10)
def finalize(self, why, terminate_debuggee=None):
"""Finalizes the debug session.
If the server is present, sends "disconnect" request with "terminateDebuggee"
set as specified request to it; waits for it to disconnect, allowing any
remaining messages from it to be handled; and closes the server channel.
If the launcher is present, sends "terminate" request to it, regardless of the
value of terminate; waits for it to disconnect, allowing any remaining messages
from it to be handled; and closes the launcher channel.
If the IDE is present, sends "terminated" event to it.
If terminate_debuggee=None, it is treated as True if the session has a Launcher
component, and False otherwise.
"""
if self.is_finalizing:
return
self.is_finalizing = True
log.info("{0}; finalizing {1}.", why, self)
if terminate_debuggee is None:
terminate_debuggee = bool(self.launcher)
try:
self._finalize(why, terminate_debuggee)
except Exception:
# Finalization should never fail, and if it does, the session is in an
# indeterminate and likely unrecoverable state, so just fail fast.
log.exception("Fatal error while finalizing {0}", self)
os._exit(1)
log.info("{0} finalized.", self)
def _finalize(self, why, terminate_debuggee):
# If the IDE started a session, and then disconnected before issuing "launch"
# or "attach", the main thread will be blocked waiting for the first server
# connection to come in - unblock it, so that we can exit.
servers.dont_wait_for_first_connection()
if self.server:
if self.server.is_connected:
if terminate_debuggee and self.launcher and self.launcher.is_connected:
# If we were specifically asked to terminate the debuggee, and we
# can ask the launcher to kill it, do so instead of disconnecting
# from the server to prevent debuggee from running any more code.
self.launcher.terminate_debuggee()
else:
# Otherwise, let the server handle it the best it can.
try:
self.server.channel.request(
"disconnect", {"terminateDebuggee": terminate_debuggee}
)
except Exception:
pass
self.server.detach_from_session()
if self.launcher and self.launcher.is_connected:
# If there was a server, we just disconnected from it above, which should
# cause the debuggee process to exit - so let's wait for that first.
if self.server:
log.info('{0} waiting for "exited" event...', self)
if not self.wait_for(
lambda: self.launcher.exit_code is not None, timeout=5
):
log.warning('{0} timed out waiting for "exited" event.', self)
# Terminate the debuggee process if it's still alive for any reason -
# whether it's because there was no server to handle graceful shutdown,
# or because the server couldn't handle it for some reason.
self.launcher.terminate_debuggee()
# Wait until the launcher message queue fully drains. There is no timeout
# here, because the final "terminated" event will only come after reading
# user input in wait-on-exit scenarios.
log.info("{0} waiting for {1} to disconnect...", self, self.launcher)
self.wait_for(lambda: not self.launcher.is_connected)
try:
self.launcher.channel.close()
except Exception:
log.exception()
# Tell the IDE that debugging is over, but don't close the channel until it
# tells us to, via the "disconnect" request.
if self.ide and self.ide.is_connected:
try:
self.ide.channel.send_event("terminated")
except Exception:
pass
def get(pid):
with _lock:
return next((session for session in _sessions if session.pid == pid), None)
def wait_until_ended():
"""Blocks until all sessions have ended.
A session ends when all components that it manages disconnect from it.
"""
while True:
_sessions_changed.wait()
with _lock:
_sessions_changed.clear()
if not len(_sessions):
return
|
add-geometry.py | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d as o3d
import open3d.visualization.gui as gui
import open3d.visualization.rendering as rendering
import platform
import random
import threading
import time
isMacOS = (platform.system() == "Darwin")
# This example shows two methods of adding geometry to an existing scene.
# 1) add via a UI callback (in this case a menu, but a button would be similar,
# you would call `button.set_on_clicked(self.on_menu_sphere_)` when
# configuring the button. See `on_menu_sphere()`.
# 2) add asynchronously by polling from another thread. GUI functions must be
# called from the UI thread, so use Application.post_to_main_thread().
# See `on_menu_random()`.
# Running the example will show a simple window with a Debug menu item with the
# two different options. The second method will add random spheres for
# 20 seconds, during which time you can be interacting with the scene, rotating,
# etc.
class SpheresApp:
MENU_SPHERE = 1
MENU_RANDOM = 2
MENU_QUIT = 3
def __init__(self):
self._id = 0
self.window = gui.Application.instance.create_window(
"Add Spheres Example", 1024, 768)
self.scene = gui.SceneWidget()
self.scene.scene = rendering.Open3DScene(self.window.renderer)
self.scene.scene.set_background([1, 1, 1, 1])
self.scene.scene.scene.set_sun_light(
[-1, -1, -1], # direction
[1, 1, 1], # color
100000) # intensity
self.scene.scene.scene.enable_sun_light(True)
bbox = o3d.geometry.AxisAlignedBoundingBox([-10, -10, -10],
[10, 10, 10])
self.scene.setup_camera(60, bbox, [0, 0, 0])
self.window.add_child(self.scene)
# The menu is global (because the macOS menu is global), so only create
# it once, no matter how many windows are created
if gui.Application.instance.menubar is None:
if isMacOS:
app_menu = gui.Menu()
app_menu.add_item("Quit", SpheresApp.MENU_QUIT)
debug_menu = gui.Menu()
debug_menu.add_item("Add Sphere", SpheresApp.MENU_SPHERE)
debug_menu.add_item("Add Random Spheres", SpheresApp.MENU_RANDOM)
if not isMacOS:
debug_menu.add_separator()
debug_menu.add_item("Quit", SpheresApp.MENU_QUIT)
menu = gui.Menu()
if isMacOS:
# macOS will name the first menu item for the running application
# (in our case, probably "Python"), regardless of what we call
# it. This is the application menu, and it is where the
# About..., Preferences..., and Quit menu items typically go.
menu.add_menu("Example", app_menu)
menu.add_menu("Debug", debug_menu)
else:
menu.add_menu("Debug", debug_menu)
gui.Application.instance.menubar = menu
# The menubar is global, but we need to connect the menu items to the
# window, so that the window can call the appropriate function when the
# menu item is activated.
self.window.set_on_menu_item_activated(SpheresApp.MENU_SPHERE,
self._on_menu_sphere)
self.window.set_on_menu_item_activated(SpheresApp.MENU_RANDOM,
self._on_menu_random)
self.window.set_on_menu_item_activated(SpheresApp.MENU_QUIT,
self._on_menu_quit)
def add_sphere(self):
self._id += 1
mat = rendering.MaterialRecord()
mat.base_color = [
random.random(),
random.random(),
random.random(), 1.0
]
mat.shader = "defaultLit"
sphere = o3d.geometry.TriangleMesh.create_sphere(0.5)
sphere.compute_vertex_normals()
sphere.translate([
10.0 * random.uniform(-1.0, 1.0), 10.0 * random.uniform(-1.0, 1.0),
10.0 * random.uniform(-1.0, 1.0)
])
self.scene.scene.add_geometry("sphere" + str(self._id), sphere, mat)
def _on_menu_sphere(self):
# GUI callbacks happen on the main thread, so we can do everything
# normally here.
self.add_sphere()
def _on_menu_random(self):
# This adds spheres asynchronously. This pattern is useful if you have
# data coming in from another source than user interaction.
def thread_main():
for _ in range(0, 20):
# We can only modify GUI objects on the main thread, so we
# need to post the function to call to the main thread.
gui.Application.instance.post_to_main_thread(
self.window, self.add_sphere)
time.sleep(1)
threading.Thread(target=thread_main).start()
def _on_menu_quit(self):
gui.Application.instance.quit()
def main():
gui.Application.instance.initialize()
SpheresApp()
gui.Application.instance.run()
if __name__ == "__main__":
main()
|
toolbox_opencv.py | # -*- coding: utf-8 -*-
import remi
import remi.gui as gui
import cv2
from threading import Timer, Thread
import traceback
import time
import math
def default_icon(name, view_w=1, view_h=0.6):
"""
A simple function to make a default svg icon for the widgets
such icons can be replaced later with a good one
"""
icon = gui.Svg(50,30)
icon.set_viewbox(-view_w/2,-view_h/2,view_w,view_h)
text = gui.SvgText(0,0,name)
text.style['font-size'] = "0.2px"
text.style['text-anchor'] = "middle"
stroke_width = 0.01
rect = gui.SvgRectangle(-view_w/2+stroke_width,-view_h/2+stroke_width,view_w-stroke_width*2,view_h-stroke_width*2)
rect.set_fill("none")
rect.set_stroke(0.01,'black')
icon.append([rect, text])
return icon
# noinspection PyUnresolvedReferences
class OpencvWidget(object):
def _setup(self):
#this must be called after the Widget super constructor
self.on_new_image.do = self.do
def do(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image.event_method_bound, '_js_code'):
self.on_new_image.event_source_instance.attributes[self.on_new_image.event_name] = self.on_new_image.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image.event_source_instance.identifier, 'event_name':self.on_new_image.event_name}
self.on_new_image.callback = callback
self.on_new_image.userdata = userdata
self.on_new_image.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image(self, *args, **kwargs):
return ()
class OpencvImage(gui.Image, OpencvWidget):
""" OpencvImage widget.
Allows to read an image from file.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAAuCAYAAAB04nriAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAFnQAABZ0B24EKIgAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAATOSURBVGiB5ZpdbBRVFMd/d6ZlpVCKQEF48jtEQkyMiRFNjEZe9AHUxMQHg/HBQOKjJD7ogxojwcYHIKAEP7AaYpQPQb6hQqpdqgIWRIpbChWU7W63y3a7O9vdmbk+lG637LYz087druH3tLn33HPOf+fOvWfujFi+eK7kFkKb7ATKTdXQD6HpBObcMZm5KGOgJ4y0LaBAcPWseh5cu33SklLJ6TeWk+0JA7fylHbDNHqYJ/9AExbdLCLJ/+8WcCW4GoPHWM8jcjMCG26s6+08w0HxHga3q8zRV1wIljwvV3IXzUU9C9lHnfyHRvEdNrqC9PzH8R5exO6SYoeYTxsP8aWvSanEUfBYYvM20tmmUnAUPFN2OTqZxWU/cikLjoJj3OvoJMr9viRTDhwFh8RSRych8bQvyTjRYfxES2IrphwYtw/HVbqDpzjHMhbxfcn+Tp7gLC+MOwE35KTBt92raU83kZMZ2vr38OqCrQTENM++XFVae0UDx8VqckzNt5kECLKK7eITQHgO7JaEFebTf1/mbGofOZkB4O/MKXZF3x6XP1eFh41OkFW0iteYQwgNkwgLsb0Vap7pTJ9gT+wdwtkLRX3t6aNcTLdwT80STz49ZWyjE2GhpwDjIScNjvau42RyB/1WtKRNxkrSFF/P3TWPIjzMMLWXyCWmzBLLdRHJdtBpBOnMBIlkLzqO6xo4STCxlSV1r7iO5UmwQQJDJAjI6UylDm2C5eSp5E5+T+4ikguRNHuwMT2Nt6RJa982Hq59kSlajasxjoLDop1mfQtXtTOkiGOKDDrV3CZrmSHnMVveyX3ycRZbz7r+A+LmVXZF36LTaJ3QFgMQyYbYH1vDsvp3XdmPKbhJ30Cr/hV9IlLUlxbX6RVXuMxvnGYnx/RNPGAv5UnzdaqYMqrP86kj7I+tJZrrcJWgG86lDrJk5grqq+9xtB11WzpY9SHHqjaWFHszNhZhcYEmfQMbpzxHi/5FSbtfEtvYHn3TV7EASSvK/tgaV7YlBbfpuzmhfU2OjOfg18R59la9z2fVK0gTz7cHE40cijeQsno9+3TDxXQLZ1P7HO2KBA+IFEf19WRE37iD21iEtGY2V7/EJa2V4/GPORxvIGXFnQePk6w0aI5vwZJjL3xFgg/oa4kK5y3BDd3aXzTqKzlirsOwkr74HIur2TMcv75pTJsRgrOkCWn+PtsaWgJzfgbqfHVbEiltTiV30D/GbTNC8M/658TEZf8z0YF5QK3/rm8mluviQOyDUftHCO7UTqjLpAqYT1lEn0//yJVMW8m+vGCJTVgUF+m+MiR6qpPhxEhbvRyKN5TsywvOYdAvetRmAoOiF4DqQ85LRiu/9n1T1J4XbIqs2gwKCYDqM3xLmgQTjUWla16w18J9wtQC09WGuJb9k8O9H41oKxBsqY1+MxowR32Ytv4fsAuKkRGLVtmpAWarDZEwr2HYw1VjgeBJ+hBgJsoXMFMOPxNM/uvSADBXbYjCizn5ggFmMCi8DFSGYB3lV3mIyhAMg1uU4m0KKkmwQPmKDQWCvZztKqOGwVVbIQWCK+ANvgBmqQ2hDf+okNkdQGkFJvKfHmqCVH2Z6+nRkIAJftVCNQkdcaOQHD6XtiXTuitgWiumQuZx+fgPED6yi1RbbEEAAAAASUVORK5CYII="
app_instance = None #the application instance used to send updates
img = None #the image data as numpy array
default_style = {'position':'absolute','left':'10px','top':'10px'}
image_source = None #the linked widget instance, get updated on image listener
#OpencvImage inherits gui.Image and so it already inherits attr_src.
# I'm redefining it in order to avoid editor_attribute_decorator
# and so preventing it to be shown in editor.
@property
def attr_src(self): return self.attributes.get('src', '')
@attr_src.setter
def attr_src(self, value): self.attributes['src'] = str(value)
def __init__(self, filename='', *args, **kwargs):
self.default_style.update(kwargs.get('style',{}))
kwargs['style'] = self.default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','200px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','180px'))
super(OpencvImage, self).__init__(filename, *args, **kwargs)
OpencvWidget._setup(self)
def on_new_image_listener(self, emitter):
if emitter.img is None:
return
self.set_image_data(emitter.img)
def set_image(self, filename):
self.filename = filename
def set_image_data(self, img):
self.img = img
self.update()
self.on_new_image()
def search_app_instance(self, node):
if issubclass(node.__class__, remi.server.App):
return node
if not hasattr(node, "get_parent"):
return None
return self.search_app_instance(node.get_parent())
def update(self, *args):
if self.app_instance==None:
self.app_instance = self.search_app_instance(self)
if self.app_instance==None:
return
self.app_instance.execute_javascript("""
url = '/%(id)s/get_image_data?index=%(frame_index)s';
xhr = null;
xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
urlCreator = window.URL || window.webkitURL;
urlCreator.revokeObjectURL(document.getElementById('%(id)s').src);
imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
""" % {'id': self.identifier, 'frame_index':str(time.time())})
def get_image_data(self, index=0):
gui.Image.set_image(self, '/%(id)s/get_image_data?index=%(frame_index)s'% {'id': self.identifier, 'frame_index':str(time.time())})
self._set_updated()
try:
ret, png = cv2.imencode('.png', self.img)
if ret:
headers = {'Content-type': 'image/png', 'Cache-Control':'no-cache'}
return [png.tostring(), headers]
except:
pass
#print(traceback.format_exc())
return None, None
class OpencvImRead(OpencvImage, OpencvWidget):
""" OpencvImRead widget.
Allows to read an image from file.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
@property
@gui.editor_attribute_decorator("WidgetSpecific",'''Image local filename''', 'file', {})
def filename(self): return self.__filename
@filename.setter
def filename(self, value):
self.__filename = value
if len(value)>0:
self.set_image_data(cv2.imread(value, cv2.IMREAD_COLOR))
def __init__(self, filename='', *args, **kwargs):
self.default_style.update(kwargs.get('style',{}))
kwargs['style'] = self.default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','200px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','180px'))
super(OpencvImRead, self).__init__("", *args, **kwargs)
OpencvWidget._setup(self)
self.filename = filename
class OpencvVideo(OpencvImage):
""" OpencvVideo widget.
Opens a video source and dispatches the image frame by generating on_new_image event.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFoAAAAuCAYAAACoGw7VAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAKyAAACsgBvRoNowAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAXdSURBVHic7ZptTFNnFMf/z21rX1baQi0MBGojoesUJeJLNBhw6oddk5YuY2LMMjMSzLIYDCZLtsTEGbPpwvi0LQsSpgkpYRFDhpGYRVCnxCWObTi/YJ2ogKuDXspLwd7bPvsAmikKvaXlUuD3pcl9Oed//3l67rknD8ESMcXpdBqDweApIrWQ+U5xcbGM5/kMnucZsfcSQjYyDFNJKdUtGf0SWJY1MwzzPiHkHUqpiRDCUEqp2DiEEA2ARAB98ujLjF92795tk8lkVZTSXADJABhCJtbi099IWTIaQFlZmeLRo0fVAN6mlKbEIseiLx1Op9MoCEILgDUA1DFK0ye6wC8kWJbNFQThJoA8xM5kAIu4dNjt9g8opScBhFsqPJRSr5gchJDXAJgAkEVn9NGjR5mOjo5vQ6HQe4SQpDBv8wDYc/78+SticpWVlSn6+vqOE0IORaVGFxUVrQqFQvnRiBVj1AAOUUpXUUqfMAwj/P8kpZTg+fdWcPL49yMjI59fvnx5PJKkDofjzagYbbfbP5n8Gy5UgsFgcNWFCxfuRxqA4Xn+BKXUEk1VS0yF4Xm+/N69ex39/f03BUFwUEplUotaiMj9fr+/vLw8KT09PY9l2R+2bdsWGB4e/lGr1VYSQh7MJnhBTw/e6ukBAFxLS8PPmZkAgDvFdzCwZgAAkHUuC8v/XD7Lx5j/POuje3p6UF1dnVhaWppSU1PzUW9v7x+Dg4O/CYJgn3xJiCbN74eV42DlOKwYHX12fMgyBM7KgbNyGE+K6P0Sd0xp7wKBAFpbW+Wtra2JWVlZiXa7/cy6devGfD7fKZ1O9w0h5N9wg9dnZ6M+O3vK8byv8mYpO/6Yto92u92oqqoyaDQaQ0FBwadFRUUfcxz3u8FgOAngEiFE9ERrsRLWJ7jf70dLS4viwIEDxmPHju28evVqvc/nuz82NnaEUhpu07+oET3rcLvdqKysXH7w4MGMxsbGz7xeb9e+fftyYyFuIRHxJ/jg4CAaGhpUHo9HtX79elM0RS1EFvX0bi5ZMnqOWDJ6jpgXY1KLxYKSkhKkpaVBrY7u/D0QCODx48doa2vDlSuippxRRXKjLRYLKioq4HK50N3dHZMcKSkp2LlzJ6xWK6qrq2OSYyYkLx379+9HXV1dzEwGAI/HA5fLBavViuTk5JjlmQ5JjU5ISIBOp8ODB7OaXYUFpRSdnZ3IycmJea6XIanRK1eunBOTn+L1emEySdPyS146QqHQjNe0l7Sja2vXrHPxPI9ly5bNOk4kSG50OHCvc7hech1nj5wFl8pJLSci4sJoAOCVPLzpXjQfbkbbh20IqAJSSxJFxO2d2WyGw+Hwbdq0abyxsfFhNEVNx3jCONwb3eh9oxd5zXmwXbMBcTCsFWW0QqHA5s2bqcPh8JpMpod6vf5LmUx2rqmpqSJWAl8GZSj8ej9uvHsDt7ffxo6aHUjsS5xLCaIJy+jU1FSwLDtSWFjIA2jS6/XHCSF/Pz1vt9tjJnA6eBUP74qJcpLxVwby6/OhGFdIomUmXmk0wzDIycmhe/fuHUhPT/dptdpKhmHOEELG5lJgOIxrJ8uJbbKc/GKTWtIUphidlJSEXbt2jbAsO8YwzCW9Xv8FIeSWFOLEQGUT5aR9Tzs0Pg3MnWapJT2HHJjYZL127Vo4nU7OYrEMajSar5VK5WlCyOhMAeYLylElDP8YsL12O3T9OqnlTEGu0+k0tbW1AzKZrNVgMJwghHRILUoM8idyaIY02NqwFZm3MqWW80rkCoXisNForCOEDEktRgwkRKAeVmN122rkXswFCc3vPfVyQsh3UosQi3pYjdSuVOS78qEaUUktJywkn0eLQTmqhK5fh8LThfO+b36RuDCaCTLQerXId+XP6zo8HXFh9IbmDTA+NIIJxs1oZgpxYbSpO/63jcTvEokzorKiQ6HQRQDCjBe+gNlsztqyZUupzWbjo6FjJlQqlezu3bu/Ukp/EnMfwzBEEIT+2eT+D23+73+IM13aAAAAAElFTkSuQmCC"
capture = None #the cv2 VideoCapture
thread_stop_flag = False #a flag to stop the acquisition thread
thread = None # the update thread
@property
@gui.editor_attribute_decorator('WidgetSpecific','The refresh interval in Hz', int, {'default':0, 'min':0, 'max':65535, 'step':1})
def framerate(self): return self.__framerate
@framerate.setter
def framerate(self, v): self.__framerate = v;
@property
@gui.editor_attribute_decorator('WidgetSpecific','The video source index', int, {'default':0, 'min':0, 'max':65535, 'step':1})
def video_source(self): return self.__video_source
@video_source.setter
def video_source(self, v): self.__video_source = v; self.capture = cv2.VideoCapture(self.__video_source)
def __init__(self, *args, **kwargs):
self.framerate = 10
self.video_source = 0
super(OpencvVideo, self).__init__("", *args, **kwargs)
self.thread = Thread(target=self.update)
self.thread.daemon = True
self.thread.start()
def set_image_data(self, image_data_as_numpy_array):
#oveloaded to avoid update
self.img = image_data_as_numpy_array
def search_app_instance(self, node):
if issubclass(node.__class__, remi.server.App):
return node
if not hasattr(node, "get_parent"):
return None
return self.search_app_instance(node.get_parent())
def __del__(self):
self.thread_stop_flag = True
super(OpencvVideo, self).__del__()
def update(self, *args):
while not self.thread_stop_flag:
time.sleep(1.0/self.framerate)
if self.app_instance==None:
self.app_instance = self.search_app_instance(self)
if self.app_instance==None:
continue
with self.app_instance.update_lock:
self.app_instance.execute_javascript("""
var url = '/%(id)s/get_image_data?index=%(frame_index)s';
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
var urlCreator = window.URL || window.webkitURL;
urlCreator.revokeObjectURL(document.getElementById('%(id)s').src);
var imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
""" % {'id': self.identifier, 'frame_index':str(time.time())})
def get_image_data(self, index=0):
gui.Image.set_image(self, '/%(id)s/get_image_data?index=%(frame_index)s'% {'id': self.identifier, 'frame_index':str(time.time())})
self._set_updated()
try:
ret, frame = self.capture.read()
if ret:
self.set_image_data(frame)
self.on_new_image()
ret, png = cv2.imencode('.png', frame)
if ret:
headers = {'Content-type': 'image/png', 'Cache-Control':'no-cache'}
# tostring is an alias to tobytes, which wasn't added till numpy 1.9
return [png.tostring(), headers]
except:
print(traceback.format_exc())
return None, None
class OpencvCrop(OpencvImage):
""" OpencvCrop widget.
Allows to crop an image.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
@property
@gui.editor_attribute_decorator('WidgetSpecific','The x crop coordinate', int, {'default':0, 'min':0, 'max':65535, 'step':1})
def crop_x(self): return self.__crop_x
@crop_x.setter
def crop_x(self, v): self.__crop_x = v; self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The y crop coordinate', int, {'default':0, 'min':0, 'max':65535, 'step':1})
def crop_y(self): return self.__crop_y
@crop_y.setter
def crop_y(self, v): self.__crop_y = v; self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The width crop coordinate', int, {'default':0, 'min':0, 'max':65535, 'step':1})
def crop_w(self): return self.__crop_w
@crop_w.setter
def crop_w(self, v): self.__crop_w = v; self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The height crop coordinate', int, {'default':0, 'min':0, 'max':65535, 'step':1})
def crop_h(self): return self.__crop_h
@crop_h.setter
def crop_h(self, v): self.__crop_h = v; self.on_new_image_listener(self.image_source)
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACsAAAAuCAYAAACxkOBzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAADpwAAA6cBPJS5GAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAfBSURBVFiFzZlrbJPnFcd/r28JSZrYcZwYUmeBEHCcmqFFrGqraWojPm2akHaRtq6sVOs6pn5A2odVW1mptBYJtqoq6tbLNgmNy9AkKHSFFAoFAqKAktJyJwQnhISEtLYTX+PLe/YhyRvbsRMTcLu/5EjPeZ7nvD8dnec8lyir3NXC/6mSJkPp+x0D4cm2AUDR6TFZa74+qgzFvHeQZGKa3QBgstawfPPurwym9+xJvrHisZz95//wU8L9nml23UxOLfSwmCOYuXnXQKNDt3N33rjMYOepu/aZE3YlL/OctPIj+SW/lsd5XDbeleOBw/vwD/Rl7auutrFYDeG7ce3eYZv4Ly2yFZhaew/zLo3yUV5O/bd6ecTZSLT7So4RCvUL5lPcc4mxUPDeYOvlZIZlHHoh7Xk5jXquUGuvoSQemnHcCmcjvs7Mb+VWVtgoFVkHRzDn5bQsHgGgwWrB1zt9oaTKlgiTiMXy8psV9jPlJyQoSrPFKeG88sNZHcajEcxGPQA1tirGbl7X+rojp9g29Bv8iUHN1rSwnuEr5+cO62URO5Xt9PFtwljp5RG2KzvxUzerQ//ALezWSq1dGhtPhbOBXewYep6LwTYCySGt32QyIeH88taQq6Ofb7Fd+XdeTlJVXGEm8KUHs3k8ZZbYq3ir8wU6zHuJyxgAQvqmqRM1L98z1tm56AGrjT7/sNa2WiyM9XdpoNmkSH47ftbIjnCbM4adfEkvoFCCGavU8U31B5RJVU5nfdHPafNtZFGsnEdZrtkf4iE+5VOtrWZEUmGOsBd0bew3vIpPuTVt8GF5gwZ5lO8kfkWdLE/ra/f/nWO+twipXmLJBxERFEUBYOXilezp20PQkT03ZWLcbEpLg37ded4zvJgVFCCijHJB18Y/jD9nr+ElksQBOOh9jQ+9mwip3nE/C/vpuN6hzbNUWGgKNE05ymAbiOW3k2mwgkqbYRMhxTvrpJgS5hP9v/incTV7/es55vsbSZk6JUmJ0D6SvoG4Fbe2IUpGjl6NnEQlmT9sp34315X8dxOAG7rTnK7YgWqc/qHO4k5Gg6Nae+XSlVT0Tt9sEokEPVyg3f9u/rCXdfnt+5mSYgEHYEy3+xf52X9tv9YuKy3DFXaNN1LS4NbgLUarRjkzupNA8ovZYYUk3conc4IFoBh4kPQVoMBR5ShjsamS5da5yVz4Hr8HMQveeB+Hva/PDhsnQlQZnXHgrJoH2NNN/Uv72Xdpn9ZudbZS6alMy1mv6tUi/Vnwffqi52aGTUys6ntWxcRvUgoclsNadEvmleCKutJ2MK9MLeioGuCIb8vMsCrT7ztzkgJYScvJzOguMyxD1OywANfCx4kmAzPBzl428lbxBPCkMqL7hPMJwne0C+s0WJUkIdWXG1bI7yCRtyykVYfU6BYVFVFpmjqVZcICJCV7Wk7A3uenAyNgS2lnRHd+xXwSiQSBQAB/mT9vt7rxP/r7iTquBxivEBNKjW6Lu4Wuri66B7uJ2qJ5uywcrB5IPaClRNdoNBKLxRiIDIzneJ4qHCxAKVA21ZyMrsfj4dy5cwyFh3JOzSZllbtaUBQilfepfGVKILUyqvoqrvZEsFVVUeX9AmxhMKWvmaKgHp2a/a0riYhS7NXnd6icI7ACoojC85GYbm0sRriri+cCAb43VEzngvkcmqeTDjUoil4Dl2KT7ut5NHzZ7f7x4Pz5IQH52G6XYRDJ+IXKypJnliy5+qrL9XtmuB8WVG83N2+JlJaqk1BJEE9tbRrox1arfPjss3KyoUGSIIM1NZEPXK4jLRZL9keMAki/x+k8HDMY5G2XS9QUuBN2exrsGEj71q0SCgalbcMGuWyziYAcX7LkQsEpW2trrScbG6+EFEV2P/OMHNq2LQ3Wa7HEux0OXyrwR08+KZM6d+CAXDebJW40ypr6+u8WDLRlwYKS6w6HVwXZs2aNqKoqR3ftSoPtdThG/tLc/CdRFM12qrZWQsGgBty2YYOMgRxobp7bzSAfbXQ6XxKQ9qYm7eOZsOcXL+4BdKnRTYIcf+cdDTaRSMiRFStkwG4PAcp9f+QAWGIyOQFira2UlJZmHeMrKhoC1PfKy99k4iquA2IHD2pj9Ho9ypo1VN25U/KzurrWgsCaREoSgPGx3E/xwzpdL8BvL178o8fh0E4zFceOMeKbOiI+/PTTdNhsfL+8/BcFgTWIFHlMJhpmgO1R1cnHAnVfWdlfJ+0tw8N0bN2qjZs3bx7R+noa4/GWgsCGIXjbYsFeW5tzzJlAQLuhrrt0ab2nrs4P45cMOXIkfXAsRmU0WlMQ2BG4Yw4GGRkZydofKy6WXTdvnkgxpUXXduIEw7fH/4Hy+f79NFy7RnkwWFYQ2P54vL8uFMLT0ZG131deHgPSTt3rLl1af2Mid5f5fBzavJmD69ZRvHo1jlCIgYqK4azO7lUrKiubkwaDHHjqKa0MpZauroUL72Sb97rL9cpkGfOl1N8bDodvrdPZUhBYQBmuqhrzGwxycNUqOb5pk2xZu1aDPbt06eUc89Lq7m27PbzD5fpPy4IFJYUCBWCPy/WBqtNNO1kJyCG3+1CueW+43S+ecjrPv9LU9Du+ypPXn93uF047nRd6HA7/YHV1xFdZGfObzfE3m5tfm4u//wEhpcccTGhJQgAAAABJRU5ErkJggg=="
def __init__(self, *args, **kwargs):
self.crop_x = 0
self.crop_y = 0
self.crop_w = 0
self.crop_h = 0
super(OpencvCrop, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter): #CROP
if emitter is None or emitter.img is None:
return
self.image_source = emitter
self.img = emitter.img[self.crop_y:self.crop_y+self.crop_h, self.crop_x:self.crop_x+self.crop_w]
self.set_image_data(self.img)
class OpencvThreshold(OpencvImage):
""" OpencvThreshold widget.
Allows to threashold an image.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAAuCAYAAAB04nriAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAETSURBVGhD7ZYBDsMgCEV197+zG+m60EwBHXaCvKRZslnhlT/TnFIqr2sbHu/PbdhO+BLpUnymO2fQPIhIe0ccaRwLjIW/QXekW7IA9duKqETakjQrbG2CHHFKe4cVlpzCll5YzEwYzhJ8jSISpiZ4x3RrgqPScNen4xWjSYlJ+8V7LBtpaJKb4siUlxOWiP4C7PzXSGvIcX3jGiJhrqmRB6U9RaoHXIuMNCyUNHauk6wFpOtm0BQebYq7b5asdN8phxYUrzUwS7aHqrBWY+c+rQegjaTGl7B2Y3eIYrh6UyK9Mhfhu6cxC8pj7wl7ojXlmLAnalOGb/pfhA0TkfZOCHsnhL0Twt4JYe+EsHdC2DcpPQHUiTG7/qs9SwAAAABJRU5ErkJggg=="
@property
@gui.editor_attribute_decorator('WidgetSpecific','The threshold value to binarize image', int, {'default':125, 'min':0, 'max':255, 'step':1})
def threshold(self): return self.__threshold
@threshold.setter
def threshold(self, v): self.__threshold = int(float(v)); self.on_new_image_listener(self.image_source)
def __init__(self, *args, **kwargs):
super(OpencvThreshold, self).__init__("", *args, **kwargs)
self.threshold = 125
def on_new_image_listener(self, emitter): #THRESHOLD
if emitter is None or emitter.img is None:
return
self.image_source = emitter
img = emitter.img
if len(img.shape)>2:
img = cv2.cvtColor(emitter.img, cv2.COLOR_BGR2GRAY)
res, self.img = cv2.threshold(img,self.threshold,255,cv2.THRESH_BINARY)
self.set_image_data(self.img)
'''
class OpencvSimpleBlobDetector(OpencvImage):
""" OpencvSimpleBlobDetector widget.
Allows to get blobs in an image.
Receives an image on on_new_image_listener.
The event on_blobs_detected can be connected to a listener further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAAuCAYAAAB04nriAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAETSURBVGhD7ZYBDsMgCEV197+zG+m60EwBHXaCvKRZslnhlT/TnFIqr2sbHu/PbdhO+BLpUnymO2fQPIhIe0ccaRwLjIW/QXekW7IA9duKqETakjQrbG2CHHFKe4cVlpzCll5YzEwYzhJ8jSISpiZ4x3RrgqPScNen4xWjSYlJ+8V7LBtpaJKb4siUlxOWiP4C7PzXSGvIcX3jGiJhrqmRB6U9RaoHXIuMNCyUNHauk6wFpOtm0BQebYq7b5asdN8phxYUrzUwS7aHqrBWY+c+rQegjaTGl7B2Y3eIYrh6UyK9Mhfhu6cxC8pj7wl7ojXlmLAnalOGb/pfhA0TkfZOCHsnhL0Twt4JYe+EsHdC2DcpPQHUiTG7/qs9SwAAAABJRU5ErkJggg=="
def __init__(self, *args, **kwargs):
super(OpencvSimpleBlobDetector, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter): #THRESHOLD
if emitter.img is None:
return
img = emitter.img
self.set_image_data(self.img)
params = cv2.SimpleBlobDetector_Params()
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
# I loghi appaiono di colore bianco
params.minThreshold = 100 # the graylevel of images
params.maxThreshold = 255
params.filterByColor = False
#params.blobColor = 255
# Filter by Area
params.filterByArea = True
params.minArea = 20
detector = cv2.SimpleBlobDetector_create(params) #SimpleBlobDetector()
# Detect blobs.
keypoints = detector.detect(diff_images.astype(np.uint8))
for k in keypoints:
cv2.circle(img, (int(k.pt[0]), int(k.pt[1])), 20, (255,0,0), 5)
'''
class OpencvSplit(OpencvImage):
""" OpencvSplit widget.
Splits the image channels and generates a signal for each one to dispatch the results.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
The events:
- on_new_image_first_component
- on_new_image_second_component
- on_new_image_third_component
dispatch each one a single channel.
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAABDCAYAAAALU4KYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAYtSURBVHhe7ZtLSBVfHMd/aklmqWRmLVwYBYEo6EpcuVFoZUm2lkAQdNlS6A+6qjY+iFBKMDQQBEsqNRNDekqJimgvHz3IZ5aWZVbm9/c/M111nvfOqXvn3g8Mc8445Pid3+v8zhS2b9++NQoipqenxcgZwsU5hJeoFhgREUH79+/ni25jamqKfv78yWOnLVAVEOK1trbyRbdx7NgxFhGEXNjPsCXg3Nwc9fT00J07d9Q3GuxYEvDr16909uxZyszMpFOnTlFRURFlZWVRaWkpLSwsiLuCE1MB19bWqLi4mC5evEi/fv0SV//n5s2bLKgSoIMRUwGvX79Ovb29YraVgYEBamhoELPgw1RAI/EUrNzjVkzLmIKCAvr+/buYBQeDg4NiZI6pBR46dEiMQmhhaoHd3d1UWVnJ4/DwcNq7dy+P3QZKNCVJ2rFASyuRvLw8PkO88+fP89htnD59mkUEjrpwCGOkCIjaMVhwXEBFPJyVw804JqCRWG4WMRQDfcRUwNHRUTHyDbdaoaGANTU1VFhYKGb6uD3OGaFbB547d47q6+vp27dvlJqaytf06kBPASsqKsQoONC0QHRgGhsbWTwrhIWFiVHwsUXAL1++UHV1NS0uLoor3gFRd+zY4crD02C2uPCZM2foypUr/EMFMxf2pLy8nM+RkZGUnp7OY7fR39+vdqg2WODy8nJQ9/a8YYOASBoTExNiJp/o6Gg6ePAgt8x2794trgYWGwR8+PChGMll+/btlJ2dzZtSJ0+epBMnTlBJSQnv30ZFRYm7AgNVQPTCnj17JmZyyc/P5x2+zdn7yJEjLCj6joGC+qTYulT6YXaBENu2bePEYUZKSgolJyeL2VYOHDhAGRkZYub/qAJ6u++Bb2rgkjhbqQeNxFOwco+/oAr448cPMbIOrA6HFeEU4uLixEifPXv2iJH/owpod3McFudNrJqfnxcjfWZnZ8XI/1EV2PzVgRGwOKsuu5kXL16IkT5W7nGCly9f0v37973yPgVVQDsdFW/FA3jo4eFhMdvK2NgYDQ0NiZkcVldXqampib+oaGtro7q6OlpZWRE/tYd9H1zH1zLjxo0bdPfuXf5DFGAFDx48oJaWFnFFDp8+faJLly7xS1J+/+vXr3V3JM2IWF8N/IcBLBCljBaJiYl83rlzJx09epQtUA/sIwPcg5JEC/yut2/f0qNHj7j2xPc1XV1dvAqS2VuEdTc3N9O7d+/ElT98/PiRkpKSLCUwzy9ebZuSk0Uu4u7MzAw/kJ0YbBdYWnt7O129elX3u0a07vDy7b5A22p4G/v+JggH+JQXbnrt2jXurCNkfP78WdyhzeTkJIcRO/xTAREyPnz4wD1IJyzw6dOndPnyZW65VVVVcaLA+h5WbgW4JcKKnUWF2g/EH6C3lFP6gQkJCfxgRpSVlfFZrx8IF0LLDDEQwsFaEC/RqIyJiaH4+Hg6fPgw/06r4QJfySIJIMb5UpIoYJ2ufM6ihWY/0CgxOAViDFpmsBRYBQRE+YA+JCwRSeTJkycc6NEVv337tqkgIyMj/G8+f/7cEfEAyiyrxbwqoOwOSEdHB3+gbmWrAN4AS4XgFy5c4GJXi8ePH3PZ4/TKZWlpiW7duiVmxqiq4aHhPlqHr6BMQSzyrPus8v79e64bEdtgqQoI9p2dnWzFMnj16pWlgl4VEOkbbqx1+AJcFP8twuoOnxZ4uVje1dbW0vj4OGdUuLcs8QBiHGK1WY9Art+ug/rLKRdDaYINLxTdekW/kyDR4WUZEY6HMju8BW/R6cYAhHMqWZgBr0TCM6ofpVrgvXv3LLWv/Bk8P7xID6kCoi5zAyiV3rx5I2YbkSYgzF9v3RloIPsj42shTUCULGZrz0AC2b+vr0/M/iBNwL8V6P8WKGdQe27+u6QJ6ERzwN9AUY/605Ow9Vhl2gBLS0vjM5Z7aCgYsWvXLj6jcDbKXoFKbGws5eTkqJZoywJhVVp1ouehYOG9BCTYEvAs4qVmYbfiGQctubAdjh8/zmej1B/o5Obm8v4QkGaBwUJIQB8JCegjIQF9RJqAgbD96QQhAX1EmoCyN6n8BWl1IJC5Z/EvQQ2oeJhUAYOBUBb2CaLfU+9XvFpkb1cAAAAASUVORK5CYII="
def __init__(self, *args, **kwargs):
super(OpencvSplit, self).__init__("", *args, **kwargs)
self.on_new_image_first_component.do = self.do_first
self.on_new_image_second_component.do = self.do_second
self.on_new_image_third_component.do = self.do_third
def on_new_image_listener(self, emitter):
self.image_source = emitter
self.set_image_data(emitter.img)
if not self.on_new_image_first_component.callback is None:
self.on_new_image_first_component()
if not self.on_new_image_second_component.callback is None:
self.on_new_image_second_component()
if not self.on_new_image_third_component.callback is None:
self.on_new_image_third_component()
def do_first(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image_first_component.event_method_bound, '_js_code'):
self.on_new_image_first_component.event_source_instance.attributes[self.on_new_image_first_component.event_name] = self.on_new_image_first_component.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image_first_component.event_source_instance.identifier, 'event_name':self.on_new_image_first_component.event_name}
self.on_new_image_first_component.callback = callback
self.on_new_image_first_component.userdata = userdata
self.on_new_image_first_component.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[0]
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image_first_component(self):
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[0]
return ()
def do_second(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image_second_component.event_method_bound, '_js_code'):
self.on_new_image_second_component.event_source_instance.attributes[self.on_new_image_second_component.event_name] = self.on_new_image_second_component.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image_second_component.event_source_instance.identifier, 'event_name':self.on_new_image_second_component.event_name}
self.on_new_image_second_component.callback = callback
self.on_new_image_second_component.userdata = userdata
self.on_new_image_second_component.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[1]
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image_second_component(self):
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[1]
return ()
def do_third(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image_third_component.event_method_bound, '_js_code'):
self.on_new_image_third_component.event_source_instance.attributes[self.on_new_image_third_component.event_name] = self.on_new_image_third_component.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image_third_component.event_source_instance.identifier, 'event_name':self.on_new_image_third_component.event_name}
self.on_new_image_third_component.callback = callback
self.on_new_image_third_component.userdata = userdata
self.on_new_image_third_component.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[2]
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image_third_component(self):
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[2]
return ()
class OpencvCvtColor(OpencvImage):
""" OpencvCvtColor widget.
Convert image colorspace.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAAA6CAYAAAAnft6RAAAABHNCSVQICAgIfAhkiAAAAAFzUkdCAK7OHOkAAAAEZ0FNQQAAsY8L/GEFAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAB3VJREFUeF7tm3tsW9Udx7/nXjtPQtMka5puXZu1hdCSsoZqLR1tUUtabWN9srA/OgnBgD0qhoAJNI2NfyahsvEP1TRNTNWQQOKhgtAe3aBdGQOBoBkUFbamtHnVSZw4duz4fa/Pfuf6xEkWO9dOfJLG5iNd+XeOHd97v+d3fr/fOddh+5qXcBQRr5wdkFZ+0OTr58yQlAcyTUdp3VKrs9CIDvWDJ0zLzrcHpgQsIfE2/PpVq7PQ+PfD+xAjEQWfT+ErjJwErMQQvsJPYzVOogrJES12shLQiTBuwREc5pvRhrtwO78HP+ZbsI8fRjm88lPFSRYCchzgP8Bm/jswJGRfkib8BW38LvqSZIAuRmwFXIfX0Ii3ZGsqDfgILXhWtooPWwGnE2+MRm7/mULFVsBq3iWtzNSgU1rFh62AHsq5dgziGmkVH7aF9Gqcoqz7fdlKz5/ZEXyM2y37vaH11muxYOuBF7AD57BXtqZyEdtJvIOyVXxktZQTZcom/B5b+FGrJhQYKMX7VBP+i/2E3i2x+gRjHsjoY6Vv2o7PgiS6PQFenrRzWgsLIevQQa8G3FQFJuCQ74wzJmDJKQ1PeA5YdqHxaO1xxHYka+KcXCQB3RKuH9enFa8Ymdc55r0cwfAn5+A9fxbenpDsXVjkJGAYIxhm3QhimDxw5su3cMCEd9lpbPjTadz4dg9a3nSh5cQ/Ed35Brz9MfmphYFtDOxn/8Fb+jPo1c6ScF4YLEIT2YkyXoWreT1q+Uqs4Tej2fwWjYZuGwM5nY21vY7GXekHoO8DDcGjrdCdTPZceWQdA0/pR3HMeSfa9eNwswsIMg+iJGMIPvLEHnRqH+CM/jJedDyEp0tuw+uOp+RfZsbnfj+jeIKGjQn4ne/I1pVPRgH/5ngSpx2/hZ+5ZU9mxHTuZ/+1BLejZK9fWplZtH/hxMO0An6kv4Z3tecQR0T25I/qtfaxs6Zp8rbZlcwUAaMsiJP604gwe0+ZCZ52XVqZGfwwp9w2r0y50hP6EQyyz2Qr/5gvLJFWZkafv1paanEt86Jz4wDF9Zl7/CQBY5QeOjS1e3uLr23GuT9mLsIv/lVHdfUm2VJDmJv49FA3+n42jKH7Auh4QJRmMyvLJgn4tn4MHqZ+b6/0HzvRfn8F4slltYURBc48Vob48zvBFFYwQ44oPnugB8HtMfBSq4JDZJ2B3u+6LDtXJgl4UXtXWmoRNd7i8DZ0/XA32rduRPuWr+LSfbtR038LSsrUxb/L9SNwPexC5HpD9owT2hTD5aU+2cqe1NVyigOiFJlLLCGb6rB43VI4FBbOYspeaHXB/aAHsVXpp6p5FYf3gI90SHpltqQEjNNCbZQNydbCJkbOMFAZRM9qD85/uxcdP++Gry0Es2b6ZBFtNtC10b7unUhKQIPN/Rp0hIbNV0ELRDq3mePIp6N77RDO3duFT37ViZ7f9GHgES/8eyKIrcwuQXDKbf7dIUql2ScUvam+8nFhGJVAxzdGrc7Z0BRIbq7qlxhuDV9n2RPx1fgR/84AjP0eaHtG4Ng9CkerH8Y2H2ItPkRXBagWjcIxUEGjm920HnSG0XW3CyN7woh/2USikgZjhqHUrOZU1kRQez5zKfVGxacwG5MDnjrNbHZXssW/ywXno4PQt8bAvpgAq6KLKKOD4g9bkoB2rQl9WwyOH/kR+cUlBL552ZqO09G7YhiuR/oR+locvGT2XiwIfj0GN82MbJiURFQSoFjk2BsCW5zFTdJVseUJ6PvJox7rgn9rn3xjMp3r3Rg87EV8RX4H36xNwHOHR7amZ4KA+Rm9dPhvcEO7lQq98UcnWcNoSjoOBTF6fye85I9jdLa44f1ewJpyKghtiKN3jb2IM4wU2ROEAf3gKFjFLG6UrlKjDOn8ZS9Glntx8aZ+eA+pE0/Ay6msuS1AyXX6cygXMHFHH1hDfsKD9iWa1g8NwyCPNEX8VEzsOgPdN0//Mz6lAopyQGuOy1Z+0CjDNlCyWCTbKuFUBAR2hDHMMt+DUgGN1gGw+vwnJ7EhVk9HldVSS5y8fvBgZi9UKqC2Vl1xLvZzGuiYCxFDm2PoqwnI1mRSArIsi9Zs4Q76xuVqa8sxEeWPBJRhLqKE0jYsW5OZIKD9TnEuxOucYHRi1QgRl9GR36ufSviGOLrWD8rWOCkBtTzPZmNR5k3TfFNKh+r/cBHr5BFa6v7/7nXquTA0huAXpu6T5UrlT6lgJmK+KhxpW2XZc4G4rV46Zr+an57ql8rxzJn30jwXTnBUDuizPsZIlCncVk6DuJG6pKmU0e3kgxM2ffM7byfAS+ZWQEEFHbVJUxnGkgTCjeNrUsYJaeeF/X9PFhYhVo2nWlda9lwiAoh4qqMy///hxAWUa8lgocwD5wuRUOwfnM4Oro/ProITUCC2QoWQc0FBCihSmWovHKMgBRRcJQ/VFKyAIkqpzsgCZQIyU/0yzg5R1lQnTWWoEzA8/wIKL6xJmspQJqAWn38BBSIbq1yhKCukYdLaWlc9gXIhf6V1BQ+AyWcl6gQsEgo2C88NwP8A7JKh2GNdWekAAAAASUVORK5CYII="
cvt_types = {'COLOR_BGR2HSV':cv2.COLOR_BGR2HSV,'COLOR_HSV2BGR':cv2.COLOR_HSV2BGR, 'COLOR_RGB2BGR':cv2.COLOR_RGB2BGR, 'COLOR_RGB2GRAY':cv2.COLOR_RGB2GRAY, 'COLOR_BGR2GRAY':cv2.COLOR_BGR2GRAY, 'COLOR_RGB2HSV':cv2.COLOR_RGB2HSV}
@property
@gui.editor_attribute_decorator('WidgetSpecific','The conversion constant code', 'DropDown', {'possible_values': cvt_types.keys()})
def conversion_code(self):
return self.__conversion_code
@conversion_code.setter
def conversion_code(self, v):
self.__conversion_code = v
self.on_new_image_listener(self.image_source)
def __init__(self, *args, **kwargs):
self.conversion_code = cv2.COLOR_BGR2HSV
super(OpencvCvtColor, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
if emitter is None or emitter.img is None:
return
code = self.cvt_types[self.conversion_code] if type(self.conversion_code) == str else self.conversion_code
self.set_image_data(cv2.cvtColor(emitter.img, code))
class OpencvBitwiseNot(OpencvImage):
""" OpencvBitwiseNot widget.
Allows to invert an image mask.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("BitwiseNot")
def __init__(self, *args, **kwargs):
super(OpencvBitwiseNot, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.set_image_data(cv2.bitwise_not(emitter.img))
except:
print(traceback.format_exc())
class BinaryOperator(object):
img1 = None
img2 = None
def process(self):
#overload this method to perform different operations
if not self.img1 is None:
if not self.img2 is None:
pass
def on_new_image_1_listener(self, emitter):
try:
self.img1 = emitter.img
self.process()
except:
print(traceback.format_exc())
def on_new_image_2_listener(self, emitter):
try:
self.img2 = emitter.img
self.process()
except:
print(traceback.format_exc())
class OpencvBitwiseAnd(OpencvImage, BinaryOperator):
""" OpencvBitwiseAnd widget.
Allows to do the AND of two images.
- Receives the image on on_new_image_1_listener.
- Receives the mask on on_new_image_2_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("BitwiseAND", 1.1)
def __init__(self, *args, **kwargs):
BinaryOperator.__init__(self)
super(OpencvBitwiseAnd, self).__init__("", *args, **kwargs)
def process(self):
if not self.img1 is None:
if not self.img2 is None:
self.set_image_data(cv2.bitwise_and(self.img1, self.img1, mask=self.img2))
class OpencvBitwiseOr(OpencvImage, BinaryOperator):
""" OpencvBitwiseOr widget.
Allows to do the OR of two images.
- Receives the image on on_new_image_1_listener.
- Receives the mask on on_new_image_2_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("BitwiseOR")
def __init__(self, *args, **kwargs):
BinaryOperator.__init__(self)
super(OpencvBitwiseOr, self).__init__("", *args, **kwargs)
def process(self):
if not self.img1 is None:
if not self.img2 is None:
self.set_image_data(cv2.bitwise_or(self.img1, self.img1, mask=self.img2))
class OpencvAddWeighted(OpencvImage, BinaryOperator):
""" OpencvAddWeighted widget.
Allows to do the add_weighted of two images.
- Receives first image on on_new_image_1_listener.
- Receives second mask on on_new_image_2_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEQAAAAuCAYAAACRfL+OAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAE6wAABOsB2CpbDQAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAe+SURBVGiB3ZpbbBxXGcd/Z2Zv9u56vXZ27RjbkJomDq1VwrUFJKQKqgghEakvIJpIiAcqhMRTkXjilQckKsEDykNf2lRFSCAELYoKIQkkaXNzyM2JYyfO+rJe79pe73qvszuHh3HWXu9lZrxrTPuTVto5c853vvmfM+f75syIY2NhyccIIfjVn24uvbbT9ko7nfk44Hjyx//0GP6DR/bSl5aIvncKKcst29kUZPTzDL38assG94ro6beh1LodW7eMQG+9x/9zHGYVVIp8ld9wWP6VAHMsM8K4+D7XeYX09B2ykQf/Cz9NkboxWFLy3HfGwj/aqZ2mggjKfFeeYIjLlbIQk7wkf0Efd3l77Sj5hUc77butCEACQtCLFM/s1E5TQT7LO1VibOU5fs/7HCC1cZzXOnfqQ1vIa51IXUeiopWdttq61RxuZx4wEeSA/FdTQ33yLtMMI6VgMv45W060m1j8IVLXyWoBFlOftNV2tO9DRsPXAZNF1SsSTQ25RNpWxx8FmgoSk81vxTU5bNqBt0PjhbE5e17tIU0FuSx+SJH6a0OafiLiS02N9/eu89rxCwQDOVtOLRQmuJ05zVT2Anobki07NBUkyTB/4XVyBGvK/8jv0BqIBSAEvPKtm/QEcmgla+mORDKVu0Akf5VUaZElbYrp3CVLbduFaR7yQHyDk/ydT3GBbiIk5EFmxFc2xDjXsN1geI3h/jUAnhpImjpSkkUeZM+zWpqvKl8pRVjVFgg6B0xttANTQQByBJng28aBsGY4FMxW/j8zssQLY3NcujVYKTswkGQ+7qeoqaxqc0Ty42T0lRo7ZamxULxFt3M/wmrnLWBJkJ2wvNZRdfy9o7f45vPTJNMevL4EuBb5ya+/yFppiVRpEZ3Ga0WqFGOxcI/97sO75W4FU0EkOlExQVSZQMVBtz7IgPwMribrB8DsYoCFuJ+BkBGapZQUPB+SdURI6EnOXRngcd6B1+PB63QBoEtJJp+v44MkVpwk7HoaVZi4LCT0fgDONMS/BuWO5vW30dC6ROeM+lvuqKeJicnKCAoUfLKXPnmIESXMAAFc+Gra61Jw6m9jvPryVVRPnIe5S6TKMQAeRwO8+++DANw7eZLBffsAmEskGDpxoq4/WT1JJH+dAx1NIptShNBZ8MwYx+5ZmD0BuvUboW5NjTynnD9mUjmHpHpDTaKTFnHSIk5GcZNQu+guD1HkSM2smY0F+PkbQwyOXqavr0RZDzHxMMTFm4NoJdWyk09Y1mbod43SoXbVuZIM+MfBvULFZX8EQmcg9pLlPmoEkUjecf6U+8pZSwY08iwpU0wq79EvR/mEfLbqAh6l/sMHsQGg9ShRlDki+XEOeb9efaJjEYLXQKmTOQduwsoXQOux1EdNgnDecZJ7yhnbzhZEhogyzn3lLCU0EsVHPMpdpiiz5o1tkCzNsarNGmuFJwY914yLdmTqN3Ctw37r11M1Q/IizTXlD01X/GZIdJbFY7JiFa1QoCQLVecVIZh7882qsnB3d+X//p4eFt56q+r84PHj6FIaAnTOUHYvM++aoNv3IsJRBCzskXc+AN80rI+YVq0S5ILyBnHx0LwDE3JKCvwSvNPgXQZnAZQSAhf7expPXVVRas4LIcCZAP8kuIwEL0WCaLmbAYfFbQ9HAXrP2xdkSr1orQMzPOswfAvcSUizOYjKDjY9vTPGCCvFLYWbYdghXBbtzBrhePn5ptUqgqyJKHPiln2Ht+PKwYEb4NlY4HzAOiCNXCSajlZVD/vCqMKIOGVdZym5Jc0XJaTvPvV2j3N6ikh+nKc6vmzNL0WHniuwegR0d8NqFUHiYpoStUmRLQQwdHtTDAAnhihp0KXOwC+ro83sz2YZDBgpfTS1yNAPjoPmBv998M1Ak41tIwwfpFMNNqxThXsZwv+ExaMNq1SiTIba5wjb7HsM/uXacidgJWEUOoxcgdAN41Yx2eXXZJ5I/oY9H7tuG2tSAyqCZMWqPcP1CM4b0aAebsBKLtaRhv5FcFl75ZEsz7NSjFh2Edc69DUOwxVB8jSI41bpikNnky1FBWuzBMAhIYAxs0zQZZn54h2LhjfoegC++/W73vzb4jvv4IIx5ZvhBFzAloAx+vooijDGRZdb2jswRFkFs7QoXV4iZPKwWYWiQegirB+qPWXdigle800gBOCpLsoUM6QLadKFNJnitlnqALqx5OX2Zy5TvLPQW5tmtEcQIY1wawUHNaI0xYkxU9qObqT96rZsum32Gy2m9XBjeecNMG4zr01/LPmxDKH3q4r25vsQFesLLBjidbI73nbdBXXzAXTvPphx2exdhTr7UG3wIwvezffTbRJkBxHKThh+gpvd2QV2byale/tJlRN7F7hbs2RLRlwRRLWSBbUbBXsRB4xZYreNDSrjM/wPF31XrW2zVRrnBI6CgkSS7GvheyYde3ddEbY/etkJcs2oCOJKgyu9sxtUAk7v7r9EqupQAO3dnQTAIRHvApQ6da+u2luylCK9qi4CEih4Whyi0taE3gI+oE4uWJR1Xv/ZoKVhPTYWPgbiRQn8+XCsFVM7I77xa5FROhkVfmCvo0yrBGhxSGtpW1R/Nr4r8dCcDlpeS4JiM8K2RRABfDqxGw8bNhxoEx/tW2YX+C/W16LHG1wTwQAAAABJRU5ErkJggg=="
@property
@gui.editor_attribute_decorator('WidgetSpecific','The alpha value', float, {'default':0, 'min':0, 'max':1.0, 'step':0.0001})
def alpha(self): return self.__alpha
@alpha.setter
def alpha(self, v): self.__alpha = v; self.process()
@property
@gui.editor_attribute_decorator('WidgetSpecific','The beta value', float, {'default':0, 'min':0, 'max':1.0, 'step':0.0001})
def beta(self): return self.__beta
@beta.setter
def beta(self, v): self.__beta = v; self.process()
@property
@gui.editor_attribute_decorator('WidgetSpecific','The gamma value', float, {'default':0, 'min':0, 'max':1.0, 'step':0.0001})
def gamma(self): return self.__gamma
@gamma.setter
def gamma(self, v): self.__gamma = v; self.process()
def __init__(self, *args, **kwargs):
self.alpha = 0.5
self.beta = 0.5
self.gamma = 0.0
BinaryOperator.__init__(self)
super(OpencvAddWeighted, self).__init__("", *args, **kwargs)
def process(self):
if not self.img1 is None:
if not self.img2 is None:
self.set_image_data(cv2.addWeighted(self.img1, self.__alpha, self.img2, self.__beta, self.__gamma))
class OpencvBilateralFilter(OpencvImage):
""" OpencvBilateralFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
border_type = {"BORDER_CONSTANT": cv2.BORDER_CONSTANT,
"BORDER_REPLICATE": cv2.BORDER_REPLICATE,
"BORDER_REFLECT": cv2.BORDER_REFLECT,
"BORDER_WRAP": cv2.BORDER_WRAP,
"BORDER_REFLECT_101": cv2.BORDER_REFLECT_101,
"BORDER_TRANSPARENT": cv2.BORDER_TRANSPARENT,
"BORDER_REFLECT101": cv2.BORDER_REFLECT101,
"BORDER_DEFAULT": cv2.BORDER_DEFAULT,
"BORDER_ISOLATED": cv2.BORDER_ISOLATED}
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAD4AAAAuCAYAAABwF6rfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAGpgAABqYBuiC2sAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAW+SURBVGiB7dpdSFN/HMfx93GtMRIRL1LCiVuZrouw+VCQNWfhNpNCC4SuelDCpEF0E3QRXdRN0VpB4CC6CMGr6EnsQiGKWZsltQYaaJYxvCgWnZpytnnO/0Jc9bf5uK3/H/3c7fx+v/P7vtjZefjtCJIkKazArAFQFIVoNPq3a0lL1Go1giBMw2OxGN+/f1/UDsbGxigoKEhJcamsISsrC7VaTcZSJ5UkiZs3by51+LLjdrsJh8NLHr9keFFREbW1tX8F73a72bVrF0ajccn7WDIcYNOmTWnHt7e3LxsNy4TD73hFSe0For29nd27dy8bDUmAwzTearXidDpTglcUhevXr2M2m5OChiTBATZu3MiBAweSjlcUhRs3blBbW0tJSUnS9ps0OCQfP/NNW63WpKIhyXD4ib969eqy8DNom81GcXFxEiucTtLhMI1vaGhYMj7VaEgRHMBgMMTxsiwveJyiKLhcLux2e8rQkEI4/MQ7nc4F4WfQdXV1bN68OZWlpRYO0/jGxsZ58elEQxrgAHq9noaGBlwu1x9/84qicO3aNfbv358WNKQJDtPffHZ2NpOTk7PaJicnyc7OxmAwpKuc9MEBMjISTzdXWyqS3tn+Q1mFr7SswldaVuErLYIkSUo0GkUUxZRPFolE4uvav2ZmXX/t2rUpr2FmeXlNymf6JYlggiCkBf1rVuyhvgr/U4aGhnA4HAnbu7q6GBoaAqC7uxun07mgSUVRJBaLLaLMxcXhcMTrSpQ54aIo0t/fn7D96dOnfPjwAYBgMDjvZDOprq4mEAgsqO9S0t/fP+/JekEnt0gkwsDAAFNTU1RWVqJWqwE4ceIEOTk5s/orisLHjx95//49sixTUlJCfn4+AJ8+fSIWizE8PExGRga5ubnk5uYCMDIywvDwMAUFBfH1c0VRePv2LVu2bGFqaopgMEhhYSFjY2OMjo4Si8UoLi5e9J+H88K/fv2K3W4nKyuLYDCIXq+ns7MTQRA4e/YsTU1NHDx48LcxkiSxb98+TCYTGo0Gr9eLw+GgpaWFBw8eIIoit2/fJjMzk6amJhobGzl37hw+n4/y8nJevHhBdXU158+fJxqNYrPZaG1t5c6dO8iyzMDAAHV1dZhMJrRaLT6fj+bmZk6dOpU8uFqtpqOjg7y8PMLhMFVVVTx58gSLxZJwjEajwe/3o1KpAPB4PJw5c4aWlhba2tq4desWFy9epLS0FICenh78fj+PHz9GpVIxMTFBRUUFx48fZ/369QDk5OTw5s0btFotAH6/nzVrpssfGBjg2LFjyYVnZmaSl5cHwLp166isrGRwcHBOuCAI3L17l+7ubt69e8fExMScf+l6PB6+ffvGyZMn49tkWWZkZCQOP3z4cBwN8PDhw/jJNRwOEwqF5tf+kkXfwAiCMOvO69/p7OzE5XJx+fJlysrKGB8fx2azJewfiUQoLS2lubk5vq2trY3CwsI/9r937x6XLl3iypUrlJeXI4oi27dvX5RjUfBIJMLLly85cuTInP0CgQB79uyhqqoKYNbbFlqtlh8/fsQ/b926FbfbjdFojJ84JUlKuCobCAQwm82YzWZg+oqy2MwLHx8f5/Tp0+h0Onp6ejCZTOzYsWPOMTU1NbS2tqJSqRBFEa/X+1v7zp07uXDhAjU1NZhMJg4dOsT9+/ex2+1YrVa+fPlCb28vHR0d6PX6Wfu3WCwcPXoUrVZLOBye85KbKHM+pIRCIV6/fo0sywQCAQwGA/X19fGFwefPn6PT6cjPz2d0dJRQKERZWRkAXq+Xvr4+dDodFosFn8+H3W4HIBqN8ujRIz5//ozFYqGoqAhFUfB4PAwODqLRaKioqMBoNCLLMl1dXdhstvjRAPDq1SuePXvGhg0b2Lt3L319fdTX1wPQ29vLtm3b/nipnXlISevT2X8hy3755/+eVfhKy4qFC5IkKSv2lc6/sfTzt/MPlclxbZFsdksAAAAASUVORK5CYII="
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter diameter', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def diameter(self):
return self.__diameter
@diameter.setter
def diameter(self, v):
self.__diameter = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter sigma color parameter', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def sigma_color(self):
return self.__sigma_color
@sigma_color.setter
def sigma_color(self, v):
self.__sigma_color = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter sigma space parameter', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def sigma_space(self):
return self.__sigma_space
@sigma_space.setter
def sigma_space(self, v):
self.__sigma_space = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter border parameter', 'DropDown', {'possible_values': border_type.keys()})
def border(self):
return self.__border
@border.setter
def border(self, v):
self.__border = v
self.on_new_image_listener(self.image_source)
def __init__(self, diameter=2, sigma_color=0, sigma_space=0, border=cv2.BORDER_CONSTANT, *args, **kwargs):
self.__sigma_color = sigma_color
self.__sigma_space = sigma_space
self.__diameter = diameter
self.__border = border
super(OpencvBilateralFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
border = self.border_type[self.border] if type(self.border) == str else self.border
self.set_image_data(cv2.bilateralFilter(emitter.img, self.diameter, self.sigma_color, self.sigma_space, borderType=border))
except:
print(traceback.format_exc())
class OpencvBlurFilter(OpencvImage):
""" OpencvBlurFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Blur")
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter kernel_size', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def kernel_size(self):
return self.__kernel_size
@kernel_size.setter
def kernel_size(self, v):
self.__kernel_size = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter border parameter', 'DropDown', {'possible_values': OpencvBilateralFilter.border_type.keys()})
def border(self):
return self.__border
@border.setter
def border(self, v):
self.__border = v
self.on_new_image_listener(self.image_source)
def __init__(self, kernel_size=2, border=cv2.BORDER_CONSTANT, *args, **kwargs):
self.__kernel_size = kernel_size
self.__border = border
super(OpencvBlurFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
border = OpencvBilateralFilter.border_type[self.border] if type(self.border) == str else self.border
self.set_image_data(cv2.blur(emitter.img, (self.kernel_size,self.kernel_size), borderType=border))
except:
print(traceback.format_exc())
def on_kernel_size_listener(self, emitter, value=None):
v = emitter.get_value() if value is None else value
v = int(v)
self.kernel_size = v
if hasattr(self, "image_source"):
self.on_new_image_listener(self.image_source)
class OpencvDilateFilter(OpencvImage):
""" OpencvDilateFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
morph_shape = {"MORPH_RECT": cv2.MORPH_RECT, "MORPH_CROSS": cv2.MORPH_CROSS, "MORPH_ELLIPSE": cv2.MORPH_ELLIPSE}
icon = default_icon("Dilate")
@property
@gui.editor_attribute_decorator('WidgetSpecific','The kernel morph shape', 'DropDown', {'possible_values': morph_shape.keys()})
def kernel_morph_shape(self):
return self.__kernel_morph_shape
@kernel_morph_shape.setter
def kernel_morph_shape(self, v):
self.__kernel_morph_shape = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter kernel_size', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def kernel_size(self):
return self.__kernel_size
@kernel_size.setter
def kernel_size(self, v):
self.__kernel_size = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter iterations', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def iterations(self):
return self.__iterations
@iterations.setter
def iterations(self, v):
self.__iterations = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter border parameter', 'DropDown', {'possible_values': OpencvBilateralFilter.border_type.keys()})
def border(self):
return self.__border
@border.setter
def border(self, v):
self.__border = v
self.on_new_image_listener(self.image_source)
def __init__(self, kernel_morph_shape=cv2.MORPH_RECT, kernel_size=2, iterations=1, border=cv2.BORDER_CONSTANT, *args, **kwargs):
self.__kernel_morph_shape = kernel_morph_shape
self.__kernel_size = kernel_size
self.__iterations = iterations
self.__border = border
super(OpencvDilateFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
_kernel_morph_shape = self.morph_shape[self.kernel_morph_shape] if type(self.kernel_morph_shape) == str else self.kernel_morph_shape
kernel = cv2.getStructuringElement(_kernel_morph_shape, (self.kernel_size, self.kernel_size))
border = OpencvBilateralFilter.border_type[self.border] if type(self.border) == str else self.border
self.set_image_data(cv2.dilate(emitter.img, kernel, iterations=self.iterations, borderType=border))
except:
print(traceback.format_exc())
class OpencvErodeFilter(OpencvDilateFilter):
""" OpencvErodeFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Erode")
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
_kernel_morph_shape = self.morph_shape[self.kernel_morph_shape] if type(self.kernel_morph_shape) == str else self.kernel_morph_shape
kernel = cv2.getStructuringElement(_kernel_morph_shape, (self.kernel_size, self.kernel_size))
border = OpencvBilateralFilter.border_type[self.border] if type(self.border) == str else self.border
self.set_image_data(cv2.erode(emitter.img, kernel, iterations=self.iterations, borderType=border))
except:
print(traceback.format_exc())
class OpencvLaplacianFilter(OpencvImage):
""" OpencvLaplacianFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Laplacian")
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter border parameter', 'DropDown', {'possible_values': OpencvBilateralFilter.border_type.keys()})
def border(self):
return self.__border
@border.setter
def border(self, v):
self.__border = v
self.on_new_image_listener(self.image_source)
def __init__(self, border=cv2.BORDER_CONSTANT, *args, **kwargs):
self.__border = border
super(OpencvLaplacianFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
border = OpencvBilateralFilter.border_type[self.border] if type(self.border) == str else self.border
self.set_image_data(cv2.Laplacian(emitter.img, -1, borderType=border))
except:
print(traceback.format_exc())
class OpencvCanny(OpencvImage):
""" OpencvCanny segmentation widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Canny")
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter threshold1', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def threshold1(self):
return self.__threshold1
@threshold1.setter
def threshold1(self, v):
self.__threshold1 = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter threshold2', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def threshold2(self):
return self.__threshold2
@threshold2.setter
def threshold2(self, v):
self.__threshold2 = v
self.on_new_image_listener(self.image_source)
def __init__(self, threshold1=80, threshold2=160, *args, **kwargs):
self.__threshold1 = threshold1
self.__threshold2 = threshold2
super(OpencvCanny, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.Canny(emitter.img, self.threshold1, self.threshold2))
except:
print(traceback.format_exc())
def on_threshold1_listener(self, emitter, value=None):
v = emitter.get_value() if value is None else value
v = int(v)
self.threshold1 = v
if hasattr(self, "image_source"):
self.on_new_image_listener(self.image_source)
def on_threshold2_listener(self, emitter, value=None):
v = emitter.get_value() if value is None else value
v = int(v)
self.threshold2 = v
if hasattr(self, "image_source"):
self.on_new_image_listener(self.image_source)
#https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html
class OpencvFindContours(OpencvImage):
""" OpencvFindContours segmentation widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("FindContours",1.2)
contours = None #the contours result of processing
hierarchy = None #the hierarchy result of processing
contour_retrieval_mode = {"RETR_LIST": cv2.RETR_LIST, "RETR_EXTERNAL": cv2.RETR_EXTERNAL, "RETR_CCOMP ": cv2.RETR_CCOMP, "RETR_TREE": cv2.RETR_TREE, "RETR_FLOODFILL": cv2.RETR_FLOODFILL}
contour_approximation_method = {"CHAIN_APPROX_NONE":cv2.CHAIN_APPROX_NONE, "CHAIN_APPROX_SIMPLE": cv2.CHAIN_APPROX_SIMPLE, "CHAIN_APPROX_TC89_L1": cv2.CHAIN_APPROX_TC89_L1, "CHAIN_APPROX_TC89_KCOS": cv2.CHAIN_APPROX_TC89_KCOS}
@property
@gui.editor_attribute_decorator('WidgetSpecific','The contour retrieval mode parameter', 'DropDown', {'possible_values': contour_retrieval_mode.keys()})
def retrieval_mode(self):
return self.__retrieval_mode
@retrieval_mode.setter
def retrieval_mode(self, v):
self.__retrieval_mode = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The contour approximation method parameter', 'DropDown', {'possible_values': contour_approximation_method.keys()})
def approximation_method(self):
return self.__approximation_method
@approximation_method.setter
def approximation_method(self, v):
self.__approximation_method = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The minimum arc length of a contour', int, {'possible_values': '', 'min': 0, 'max': 9223372036854775807, 'default': 1, 'step': 1})
def min_arc_length(self):
return self.__min_arc_length
@min_arc_length.setter
def min_arc_length(self, v):
self.__min_arc_length = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The minimum arc length of a contour', int, {'possible_values': '', 'min': 0, 'max': 9223372036854775807, 'default': 1, 'step': 1})
def max_arc_length(self):
return self.__max_arc_length
@max_arc_length.setter
def max_arc_length(self, v):
self.__max_arc_length = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The minimum contour area', int, {'possible_values': '', 'min': 0, 'max': 9223372036854775807, 'default': 1, 'step': 1})
def min_contour_area(self):
return self.__min_contour_area
@min_contour_area.setter
def min_contour_area(self, v):
self.__min_contour_area = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The maximum contour area', int, {'possible_values': '', 'min': 0, 'max': 9223372036854775807, 'default': 1, 'step': 1})
def max_contour_area(self):
return self.__max_contour_area
@max_contour_area.setter
def max_contour_area(self, v):
self.__max_contour_area = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The maximum contour area', int, {'possible_values': '', 'min': 0, 'max': 9223372036854775807, 'default': 1, 'step': 1})
def max_arc_length(self):
return self.__max_arc_length
@max_arc_length.setter
def max_arc_length(self, v):
self.__max_arc_length = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','When true, convex contours are discarded', bool, {})
def discard_convex(self):
return self.__discard_convex
@discard_convex.setter
def discard_convex(self, v):
self.__discard_convex = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','When true, non-convex contours are discarded', bool, {})
def discard_non_convex(self):
return self.__discard_non_convex
@discard_non_convex.setter
def discard_non_convex(self, v):
self.__discard_non_convex = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The minimum acceptable circularity', float, {'possible_values': '', 'min': 0.0, 'max': 1.0, 'default': 0.0, 'step': 0.01})
def min_roundness(self):
return self.__min_roundness
@min_roundness.setter
def min_roundness(self, v):
self.__min_roundness = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The maximum acceptable circularity', float, {'possible_values': '', 'min': 0.0, 'max': 1.0, 'default': 0.0, 'step': 0.01})
def max_roundness(self):
return self.__max_roundness
@max_roundness.setter
def max_roundness(self, v):
self.__max_roundness = v
self.on_new_image_listener(self.image_source)
def __init__(self, retrieval_mode=cv2.RETR_LIST, approximation_method=cv2.CHAIN_APPROX_SIMPLE, *args, **kwargs):
self.__retrieval_mode = retrieval_mode
self.__approximation_method = approximation_method
self.__min_arc_length = 0
self.__max_arc_length = 9223372036854775807
self.__min_contour_area = 0
self.__max_contour_area = 9223372036854775807
self.__discard_convex = False
self.__discard_non_convex = False
self.__min_roundness = 0.0
self.__max_roundness = 1.0
super(OpencvFindContours, self).__init__("", *args, **kwargs)
self.on_new_contours_result.do = self.do_contours_result
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
if emitter.img is None:
return
_retrieval_mode = self.contour_retrieval_mode[self.retrieval_mode] if type(self.retrieval_mode) == str else self.retrieval_mode
_approximation_method = self.contour_approximation_method[self.approximation_method] if type(self.approximation_method) == str else self.approximation_method
major = cv2.__version__.split('.')[0]
img = emitter.img.copy()
if major == '3':
img, self.contours, self.hierarchy = cv2.findContours(img, _retrieval_mode, _approximation_method)
else:
self.contours, self.hierarchy = cv2.findContours(img, _retrieval_mode, _approximation_method)
filtered_contours_indices = []
for ic in range(0, len(self.contours)):
c = self.contours[ic]
if not (self.__discard_convex and cv2.isContourConvex(c)):
if not (self.__discard_non_convex and not cv2.isContourConvex(c)):
l = cv2.arcLength(c, True)
if l>self.__min_arc_length and l<self.__max_arc_length:
area = cv2.contourArea(c)
if area>self.__min_contour_area and area<self.__max_contour_area:
#https://answers.opencv.org/question/21101/circularity-of-a-connected-component/
roundness = (4.0*area) / (math.pi* (l/math.pi)**2) #4 Area / (pi Max-diam^2)
if roundness>self.__min_roundness and roundness<self.__max_roundness:
filtered_contours_indices.append(ic)
#drawing selected contours
img.fill(255)
for i in filtered_contours_indices:
img = cv2.drawContours(img, self.contours, i, 0, 1, cv2.LINE_AA)
self.set_image_data(img)
self.on_new_contours_result()
except:
print(traceback.format_exc())
def do_contours_result(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_contours_result.event_method_bound, '_js_code'):
self.on_new_contours_result.event_source_instance.attributes[self.on_new_contours_result.event_name] = self.on_new_contours_result.event_method_bound._js_code%{
'emitter_identifier':self.on_new_contours_result.event_source_instance.identifier, 'event_name':self.on_new_contours_result.event_name}
self.on_new_contours_result.callback = callback
self.on_new_contours_result.userdata = userdata
self.on_new_contours_result.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
callback(self, self.contours, self.hierarchy, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter, contours, hierarchy)")
@gui.decorate_event
def on_new_contours_result(self):
return (self.contours, self.hierarchy)
class OpencvInRangeGrayscale(OpencvImage):
""" OpencvInRangeGrayscale thresholding widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("InRange")
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter threshold1', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def threshold1(self):
return self.__threshold1
@threshold1.setter
def threshold1(self, v):
self.__threshold1 = v
self.on_new_image_listener(self.image_source)
@property
@gui.editor_attribute_decorator('WidgetSpecific','The filter threshold2', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 1, 'step': 1})
def threshold2(self):
return self.__threshold2
@threshold2.setter
def threshold2(self, v):
self.__threshold2 = v
self.on_new_image_listener(self.image_source)
def __init__(self, threshold1=80, threshold2=160, *args, **kwargs):
self.__threshold1 = threshold1
self.__threshold2 = threshold2
super(OpencvInRangeGrayscale, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.inRange(emitter.img, self.threshold1, self.threshold2))
except:
print(traceback.format_exc())
|
index.py | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 gomashio1596
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
try:
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
import datetime
from functools import partial, wraps
from glob import glob
import json
import logging
import os
import platform
import random
import re
import string
import socket
import sys
from threading import Thread, Timer
import time
import traceback
from typing import Optional, Union, Type, Any, List, Callable
import unicodedata
import webbrowser
except ModuleNotFoundError as e:
import traceback
print(traceback.format_exc())
import platform
print(f'Python {platform.python_version()}\n')
print('標準ライブラリの読み込みに失敗しました。Pythonのバージョンが間違っている可能性があります。Pythonの再インストールなどを試してみてください。問題が修正されない場合は\nTwitter @gomashio1596\nDiscord gomashio#4335\nこちらか\nhttps://discord.gg/NEnka5N\nDiscordのサーバーまでお願いします')
print('Failed to load basic library. Python version maybe wrong. Try reinstall Python. If the issue is not resolved, contact me\nTwitter @gomashio1596\nDiscord gomashio#4335\nor please join support Discord server\nhttps://discord.gg/NEnka5N')
sys.exit(1)
try:
import aiohttp
from crayons import cyan, green, magenta, red, yellow
import discord
import fortnitepy
from fortnitepy import ClientPartyMember
import jaconv
from jinja2 import Environment, FileSystemLoader
from sanic.request import Request
from sanic import Sanic
import sanic.exceptions
import sanic.response
import requests
except ModuleNotFoundError as e:
print(traceback.format_exc())
print(f'Python {platform.python_version()}\n')
print('サードパーティーライブラリの読み込みに失敗しました。INSTALL.bat を実行してください。問題が修正されない場合は\nTwitter @gomashio1596\nDiscord gomashio#4335\nこちらか\nhttps://discord.gg/NEnka5N\nDiscordのサーバーまでお願いします')
print('Failed to load third party library. Please run INSTALL.bat. If the issue is not resolved, contact me\nTwitter @gomashio1596\nDiscord gomashio#4335\nor please join support Discord server\nhttps://discord.gg/NEnka5N')
sys.exit(1)
if sys.platform == 'win32':
asyncio.set_event_loop(asyncio.ProactorEventLoop())
else:
try:
import uvloop
except ModuleNotFoundError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
if True: #Classes
class bool_:
@classmethod
def create(cls, content: str) -> bool:
d = {"false": False, "true": True}
return d.get(content.lower(), False)
class bool_none:
@classmethod
def create(cls, content: str) -> bool:
d = {"false": False, "true": True, "none": None}
return d.get(content.lower(), False)
class select:
def __init__(self, content: List[dict]) -> None:
self.content = content
class Red:
pass
class FixRequired:
pass
class LoginManager:
def __init__(self) -> None:
self.id_len = 64
self.expire_time = datetime.timedelta(minutes=10)
self.expires = {}
self.cookie_key = "X-SessionId"
self.no_auth_handler_ = sanic.response.html("Unauthorized")
def generate_id(self, request: Request) -> str:
Id = "".join(random.choices(string.ascii_letters + string.digits, k=self.id_len))
while Id in self.expires.keys():
Id = "".join(random.choices(string.ascii_letters + string.digits, k=self.id_len))
return Id
def authenticated(self, request: Request) -> bool:
if data["web"]["login_required"]:
Id = request.cookies.get(self.cookie_key)
if not Id:
return False
elif Id in self.expires.keys():
return True
else:
return False
else:
return True
def login_user(self, request: Request, response: Type[sanic.response.BaseHTTPResponse]) -> None:
Id = self.generate_id(request)
response.cookies[self.cookie_key] = Id
self.expires[Id] = datetime.datetime.utcnow() + self.expire_time
def logout_user(self, request: Request, response: Type[sanic.response.BaseHTTPResponse]) -> None:
Id = request.cookies.get(self.cookie_key)
if Id:
del response.cookies[self.cookie_key]
self.expires[Id] = datetime.datetime.utcnow() + self.expire_time
def login_required(self, func: Callable):
@wraps(func)
def deco(*args: Any, **kwargs: Any):
request = args[0]
if self.authenticated(request):
return func(*args, **kwargs)
elif isinstance(self.no_auth_handler_, sanic.response.BaseHTTPResponse):
return self.no_auth_handler_
elif callable(self.no_auth_handler_):
return self.no_auth_handler_(*args, **kwargs)
return deco
def no_auth_handler(self, func: Callable):
if asyncio.iscoroutinefunction(func) is False:
raise ValueError("Function must be a coroutine")
self.no_auth_handler_ = func
@wraps(func)
def deco(*args: Any, **kwargs: Any):
return func(*args, **kwargs)
return deco
class WebUser:
def __init__(self, sessionId: str) -> None:
self._id = sessionId
@property
def display_name(self) -> None:
return "WebUser"
@property
def id(self) -> None:
return self._id
class WebMessage:
def __init__(self, content: str, sessionId: str, client: fortnitepy.Client) -> None:
self._sessionId = sessionId
self._content = content
self._client = client
self._author = WebUser(self._sessionId)
self._messages = []
@property
def author(self) -> WebUser:
return self._author
@property
def content(self) -> str:
return self._content
@property
def client(self) -> Type[fortnitepy.Client]:
return self._client
@property
def result(self) -> str:
return self._messages
def reply(self, content: str) -> None:
self._messages.append(content)
class AllMessage:
def __init__(self,
content: str,
author: Union[fortnitepy.user.UserBase, discord.abc.User, WebUser],
client: fortnitepy.Client,
base: Union[fortnitepy.message.MessageBase, discord.Message, WebMessage]
) -> None:
self._content = content
self._author = author
self._client = client
self._base = base
self._messages = {}
@property
def author(self) -> WebUser:
return self._author
@property
def content(self) -> str:
return self._content
@property
def client(self) -> fortnitepy.Client:
return self._client
@property
def base(self) -> Union[fortnitepy.message.MessageBase, discord.Message, WebMessage]:
return self._base
@property
def result(self) -> str:
return self._messages
def reply(self, content: str, client: fortnitepy.Client) -> None:
if not self._messages.get(client.user.id):
self._messages[client.user.id] = []
self._messages[client.user.id].append(content)
class CanBeMultiple:
pass
class Client(fortnitepy.Client):
def __init__(self, **kwargs: Any) -> None:
self.email = email
self.status_ = data['fortnite']['status']
self.eid = data['fortnite']['eid']
self.isready = False
self.booting = False
self.timer = None
self.acceptinvite_interval = True
self.stopcheck = False
self.outfitlock = False
self.backpacklock = False
self.pickaxelock = False
self.emotelock = False
self.owner = None
self.prevmessage = {}
self.select = {}
self.invitelist = []
self.whisper = data['fortnite']['whisper']
self.partychat = data['fortnite']['partychat']
self.discord = data['discord']['discord']
self.web = data['web']['web']
self.whisperperfect = data['fortnite']['disablewhisperperfectly']
self.partychatperfect = data['fortnite']['disablepartychatperfectly']
self.discordperfect = data['discord']['disablediscordperfectly']
self.joinmessageenable = data['fortnite']['joinmessageenable']
self.randommessageenable = data['fortnite']['randommessageenable']
self.outfitmimic = data['fortnite']['outfitmimic']
self.backpackmimic = data['fortnite']['backpackmimic']
self.pickaxemimic = data['fortnite']['pickaxemimic']
self.emotemimic = data['fortnite']['emotemimic']
self.acceptinvite = data['fortnite']['acceptinvite']
self.acceptfriend = data['fortnite']['acceptfriend']
super().__init__(**kwargs)
def add_cache(self, user: fortnitepy.user.UserBase) -> None:
try:
if isinstance(user, fortnitepy.user.UserBase) and user.id:
if isinstance(user, fortnitepy.User):
if user.display_name:
cache_users[user.display_name] = user
else:
user = self.get_user(user.id)
if user and user.display_name:
cache_users[user.display_name] = user
except Exception:
send(l('bot'),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
def inviteaccept(self) -> None:
send(name(self.user),l("inviteaccept"),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
self.acceptinvite = True
def inviteinterval(self) -> None:
send(name(self.user),l("inviteinterval"),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
self.acceptinvite_interval = True
def lock_check(self, author_id: str) -> bool:
if getattr(self.owner,"id",None) == author_id:
return False
elif data['fortnite']['whitelist-ignorelock'] and author_id in whitelist:
return False
elif getattr(dclient.owner,"id",None) == author_id:
return False
elif data['discord']['whitelist-ignorelock'] and author_id in whitelist_:
return False
return True
def is_most(self) -> None:
name = self.user.display_name
member_joined_at_most = [self.user.id, getattr(getattr(self.party,"me",None),"joined_at",datetime.datetime.now())]
for member_ in self.party.members.copy().values():
self.add_cache(member_)
if member_.id in [i.user.id for i in loadedclients]:
if member_.id != self.user.id:
name += f"/{str(member_.display_name)}"
if member_.joined_at < member_joined_at_most[1]:
member_joined_at_most = [member_.id, getattr(member_, "joined_at", datetime.datetime.now())]
if self.user.id == member_joined_at_most[0]:
return name
return None
async def status_loop(self) -> None:
while True:
try:
"""party = getattr(self,"party",None)
if party:
config = party.config
party_id = party.id
party_size = party.member_count
party_max_size = config["max_size"]
else:
party_id = None
party_size = None
party_max_size = None
var = {
"friend_count": len(self.friends),
"pending_count": len(self.pending_friends),
"block_count": len(self.blocked_users),
"display_name": self.user.display_name,
"id": self.user.id,
"party_id": party_id,
"party_size": party_size,
"party_max_size": party_max_size
}"""
var = globals()
var.update({"client": self})
await self.set_status(self.status_.format(**var))
except Exception:
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await asyncio.sleep(30)
async def invitation_accept(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
try:
await invitation.accept()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_already_member_of_party"))
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(name(self.user),l("already_member_of_party"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("user_notfound"))
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(self.user.display_name,l("user_notfound"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.Forbidden:
if data['ingame-error']:
await invitation.sender.send(l("error_private_party"))
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(self.user.display_name,l("error_private_party"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_accepting_partyinvite"))
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(self.user.display_name,l("error_while_accepting_partyinvite"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
self.acceptinvite_interval = False
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['fortnite']['inviteinterval']:
try:
self.timer.cancel()
except Exception:
pass
self.acceptinvite_interval = False
self.timer = Timer(data['fortnite']['interval'], self.inviteinterval, args=(self,))
self.timer.start()
if data['loglevel'] == 'normal':
send(name(self.user),l("accepted_invite_from", name(invitation.sender)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(name(self.user),f'{l("accepted_invite_from2", f"{name(invitation.sender)} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id)}',add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def invitation_decline(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_from", str(invitation.sender.display_name)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_from2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def invitation_decline_interval(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
await invitation.sender.send(l("declined_invite_interval3", str(data["fortnite"]["interval"])))
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_interval", str(invitation.sender.display_name), str(data["fortnite"]["interval"])),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_interval2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id, str(data["fortnite"]["interval"])),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def invitation_decline_owner(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
await invitation.sender.send(l("declined_invite_owner3"))
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_owner", str(invitation.sender.display_name)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_owner2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def invitation_decline_whitelist(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
await invitation.sender.send(l("declined_invite_whitelist3"))
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_whitelist", str(invitation.sender.display_name)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_whitelist2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def change_asset(self, author_id: str, type_: str, id_: str, variants: Optional[dict] = {}, enlightenment: Optional[Union[tuple, list]] = []) -> None:
if not enlightenment:
enlightenment = None
if type_ == "Outfit":
if self.outfitlock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants_ = self.party.me.create_variants(item="AthenaCharacter", profile_banner='ProfileBanner')
variants += variants_
await self.party.me.edit_and_keep(partial(self.party.me.set_outfit, asset=id_, variants=variants, enlightenment=enlightenment))
try:
if data['fortnite']['avatar_id'] == "{bot}":
self.set_avatar(fortnitepy.Avatar(asset=self.party.me.outfit, background_colors=data['fortnite']['avatar_color']))
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif type_ == "Back Bling":
if self.backpacklock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants_ = self.party.me.create_variants(item="AthenaBackpack", profile_banner='ProfileBanner')
variants += variants_
await self.party.me.edit_and_keep(partial(self.party.me.set_backpack, asset=id_, variants=variants, enlightenment=enlightenment))
elif type_ == "Pet":
if self.backpacklock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants_ = self.party.me.create_variants(item="AthenaBackpack", profile_banner='ProfileBanner')
variants += variants_
await self.party.me.edit_and_keep(partial(self.party.me.set_pet, asset=id_, variants=variants))
elif type_ == "Harvesting Tool":
if self.pickaxelock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants_ = self.party.me.create_variants(item="AthenaPickaxe", profile_banner='ProfileBanner')
variants += variants_
await self.party.me.edit_and_keep(partial(self.party.me.set_pickaxe, asset=id_, variants=variants))
await self.party.me.set_emote("EID_IceKing")
elif type_ == "Emote":
if self.emotelock and self.lock_check(author_id):
return False
else:
if member_asset(self.party.me, "emote") and member_asset(self.party.me, "emote").lower() == id_.lower():
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=id_)
self.eid = id_
elif type_ == "Emoticon":
if self.emotelock and self.lock_check(author_id):
return False
else:
if member_asset(self.party.me, "emote") and member_asset(self.party.me, "emote").lower() == id_.lower():
await self.party.me.clear_emote()
id_ = f"/Game/Athena/Items/Cosmetics/Dances/Emoji/{id_}.{id_}"
await self.party.me.set_emote(asset=id_)
self.eid = id_
elif type_ == "Toy":
if self.emotelock and self.lock_check(author_id):
return False
else:
if member_asset(self.party.me, "emote") and member_asset(self.party.me, "emote").lower() == id_.lower():
await self.party.me.clear_emote()
id_ = f"/Game/Athena/Items/Cosmetics/Toys/{id_}.{id_}"
await self.party.me.set_emote(asset=id_)
self.eid = id_
return True
async def hide(self, member_id: Optional[str] = None) -> None:
if not self.party.me.leader:
raise fortnitepy.Forbidden("You must be the party leader to perform this action.")
if not member_id:
squad_assignments = [{"memberId": self.user.id, "absoluteMemberIdx": 0}]
num = 1
if self.owner and data['fortnite']['show-owner'] and self.party.members.get(self.owner.id):
squad_assignments.append({"memberId": self.owner.id, "absoluteMemberIdx": num})
num += 1
if data['fortnite']['show-whitelist']:
for whitelistuser in whitelist:
if self.party.members.get(whitelistuser):
squad_assignments.append({"memberId": whitelistuser, "absoluteMemberIdx": num})
num += 1
if data['fortnite']['show-bot']:
for botuser in (otherbotlist + [i.user.id for i in loadedclients]):
if self.party.members.get(botuser):
squad_assignments.append({"memberId": botuser, "absoluteMemberIdx": num})
num += 1
else:
member = self.party.members.get(member_id)
if not member:
raise fortnitepy.NotFound("This member is not a part of this party.")
squad_assignments = getattr(self,"visual_members",self.party.meta.squad_assignments)
[squad_assignments.remove(i) for i in squad_assignments if i["memberId"] == member.id]
self.visual_members = [{"memberId": i["memberId"], "absoluteMemberIdx": i["absoluteMemberIdx"]} for i in squad_assignments]
prop = self.party.meta.set_squad_assignments(squad_assignments)
await self.party.patch(updated=prop)
async def show(self, member_id: Optional[str] = None) -> None:
if not self.party.me.leader:
raise fortnitepy.Forbidden("You must be the party leader to perform this action.")
squad_assignments = getattr(self,"visual_members",self.party.meta.squad_assignments)
squad_members = [member["memberId"] for member in squad_assignments]
member_indexes = [member["absoluteMemberIdx"] for member in squad_assignments]
available_indexes = [num for num in range(15) if num not in member_indexes]
if not member_id:
squad_assignments.extend([{"memberId": member_id, "absoluteMemberIdx": available_indexes[num]} for num,member_id in enumerate([i for i in self.party.members if i not in squad_members])])
else:
member = self.party.members.get(member_id)
if not member:
raise fortnitepy.NotFound("This member is not a part of this party.")
if member.id not in squad_members:
squad_assignments.append({"memberId": member.id, "absoluteMemberIdx": available_indexes[0]})
self.visual_members = [{"memberId": i["memberId"], "absoluteMemberIdx": i["absoluteMemberIdx"]} for i in squad_assignments]
prop = self.party.meta.set_squad_assignments(squad_assignments)
await self.party.patch(updated=prop)
async def party_member_outfit_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
flag = False
if isinstance(self.outfitmimic,bool) and self.outfitmimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.outfitmimic,str) and member.id == self.outfitmimic:
flag = True
display_name_ = self.is_most()
if display_name_ and not member_asset(member,"outfit"):
send(display_name_,f"ID: {member_asset(member,'outfit')}")
if flag:
if not member_asset(member,"outfit"):
try:
await self.change_asset(self.user.id, "Outfit", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
await self.change_asset(self.user.id, "Outfit", member_asset(member,"outfit"), member.outfit_variants, member.enlightenments)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def party_member_backpack_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
flag = False
if isinstance(self.backpackmimic,bool) and self.backpackmimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.backpackmimic,str) and member.id == self.backpackmimic:
flag = True
display_name_ = self.is_most()
if display_name_ and not member_asset(member,"backpack"):
send(display_name_,f"ID: {member_asset(member,'backpack')}")
if flag:
if not member_asset(member,"backpack"):
try:
await self.change_asset(self.user.id, "Back Bling", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
type_ = convert_to_type(member_asset(member,'backpack'))
await self.change_asset(self.user.id, type_, member_asset(member,"backpack"), member.backpack_variants, member.enlightenments)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def party_member_pickaxe_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
flag = False
if isinstance(self.pickaxemimic,bool) and self.pickaxemimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.pickaxemimic,str) and member.id == self.pickaxemimic:
flag = True
display_name_ = self.is_most()
if display_name_ and not member_asset(member,"pickaxe"):
send(display_name_,f"ID: {member_asset(member,'pickaxe')}")
if flag:
if not member_asset(member,"pickaxe"):
try:
await self.change_asset(self.user.id, "Harvesting Tool", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
await self.change_asset(self.user.id, "Harvesting Tool", member_asset(member,"pickaxe"), member.pickaxe_variants)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def party_member_emote_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
flag = False
if isinstance(self.emotemimic,bool) and self.emotemimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.emotemimic,str) and member.id == self.emotemimic:
flag = True
display_name_ = self.is_most()
if display_name_ and not member_asset(member,"emote"):
send(display_name_,f"ID: {member_asset(member,'emote')}")
if flag:
if not member_asset(member,"emote"):
try:
await self.change_asset(self.user.id, "Emote", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
type_ = convert_to_type(member_asset(member,"emote"))
await self.change_asset(self.user.id, type_, member_asset(member,"emote"))
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
#Events
async def event_device_auth_generate(self, details: dict, email: str) -> None:
store_device_auth_details(email, details)
async def event_ready(self) -> None:
global first_boot
loop = asyncio.get_event_loop()
display_name = name(self.user)
send(display_name,f'{l("login")}: {display_name}',green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
self.isready = True
self.booting = False
loadedclients.append(self)
client_name[self.user.display_name] = self
self.add_cache(self.user)
for user in [list(self.friends.values()) + list(self.pending_friends.values()) + list(self.blocked_users.values())]:
self.add_cache(user)
loop.create_task(self.status_loop())
try:
if data['fortnite']['avatar_id'] == "{bot}":
self.set_avatar(fortnitepy.Avatar(asset=self.party.me.outfit, background_colors=data['fortnite']['avatar_color']))
else:
self.set_avatar(fortnitepy.Avatar(asset=data['fortnite']['avatar_id'].format(bot=self.party.me.outfit), background_colors=data['fortnite']['avatar_color']))
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await client.party.set_privacy(data["fortnite"]["privacy"])
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
owner = None
try:
owner = await self.fetch_profile(data['fortnite']['owner'])
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not owner:
send(display_name,l("owner_notfound"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.add_cache(owner)
friend = self.get_friend(owner.id)
if not friend:
if data['fortnite']['addfriend']:
send(display_name,l("not_friend_with_owner",commands["reload"]),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
try:
await self.add_friend(owner.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
self.owner = friend
send(display_name,f'{l("owner")}: {name(self.owner)}',green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
if self.owner:
await self.owner.send(l("click_invite"))
async def _(listuser: str) -> None:
try:
user = await self.fetch_profile(listuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l("invitelist_user_notfound",listuser),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
friend = self.get_friend(user.id)
if friend is None and user.id != self.user.id:
if data['fortnite']['addfriend']:
try:
await self.add_friend(friend.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
send(display_name,l("not_friend_with_inviteuser",listuser,commands["reload"]),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.add_cache(user)
self.invitelist.append(user.id)
try:
await asyncio.gather(*[_(listuser) for listuser in data['fortnite']['invitelist']])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f'invitelist {self.invitelist}',yellow,add_d=lambda x:f'```\n{x}\n```')
if first_boot:
first_boot = False
lists = {
"blacklist": "blacklist",
"whitelist": "whitelist",
"otherbotlist": "botlist"
}
async def _(listuser: str) -> None:
try:
user = await self.fetch_profile(listuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
self.add_cache(user)
if not user:
send(display_name,l(f"{data_}_user_notfound",listuser),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
if data_ == "blacklist" and data["fortnite"]["blacklist-autoblock"]:
try:
await user.block()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
globals()[list_].append(user.id)
for list_,data_ in lists.items():
try:
await asyncio.gather(*[_(listuser) for listuser in data['fortnite'][list_]])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f"fortnite {data_}list {globals()[list_]}",yellow,add_d=lambda x:f'```\n{x}\n```')
lists = [
"outfitmimic",
"backpackmimic",
"pickaxemimic",
"emotemimic"
]
async def _(mimic: str) -> None:
if isinstance(data['fortnite'][mimic],str):
try:
user = await self.fetch_profile(data['fortnite'][mimic])
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l(f"{mimic}_user_notfound",data['fortnite'][mimic]),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
setattr(self,mimic,user.id)
if data['loglevel'] == "debug":
send(display_name,f"{mimic} {getattr(self,mimic)}",yellow,add_d=lambda x:f'```\n{x}\n```')
try:
await asyncio.gather(*[_(mimic) for mimic in lists])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['fortnite']['acceptfriend']:
pendings = [i for i in self.pending_friends.values() if i.incoming]
for pending in pendings:
if self.acceptfriend is True:
try:
await pending.accept()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif self.acceptfriend is False:
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['discord']['enabled']:
try:
await dclient.start(data['discord']['token'])
except Exception:
data["discord"]["enabled"] = False
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_close(self) -> None:
self.isready = False
send(name(self.user),f'{l("closing")}: {self.user.display_name}',green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_restart(self) -> None:
send(name(self.user),l("relogin", self.user.display_name),green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_party_invite(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
if not self.isready or not invitation:
return
display_name = name(self.user)
self.add_cache(invitation.sender)
if invitation.sender.id in blacklist and data['fortnite']['blacklist-declineinvite']:
return
if invitation.sender.id == getattr(self.owner,"id",None):
await self.invitation_accept(invitation)
return
if invitation.sender.id in whitelist and data['fortnite']['whitelist-allowinvite']:
await self.invitation_accept(invitation)
return
if data['loglevel'] == 'normal':
send(display_name,l("invite_from",name(invitation.sender)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(display_name,l("invite_from2",f'{name(invitation.sender)} [{platform_to_str(invitation.sender.platform)}]',invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
if self.owner:
if self.owner.id in self.party.members.keys() and data['fortnite']['invite-ownerdecline']:
await self.invitation_decline_owner(invitation)
return
if True in [memberid in whitelist for memberid in self.party.members.keys()] and data['fortnite']['whitelist-declineinvite']:
await self.invitation_decline_whitelist(invitation)
elif not self.acceptinvite:
await self.invitation_decline(invitation)
elif not self.acceptinvite_interval:
await self.invitation_decline_interval(invitation)
else:
await self.invitation_accept(invitation)
async def event_friend_request(self, request: Union[fortnitepy.IncomingPendingFriend, fortnitepy.OutgoingPendingFriend]) -> None:
if not self.isready or not request:
return
display_name = name(self.user)
self.add_cache(request)
if request.outgoing:
send(display_name,l("friend_request_to",name(request)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
return
send(display_name,l("friend_request_from",name(request)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
if self.acceptfriend is True:
try:
await request.accept()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_accepting_friendrequest"),red,add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif self.acceptfriend is False:
try:
await request.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_declining_friendrequest"),red,add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("friend_request_decline",name(request)),red,add_d=lambda x:f'>>> {x}')
async def event_friend_add(self, friend: fortnitepy.Friend) -> None:
if not self.isready or not friend:
return
display_name = name(self.user)
self.add_cache(friend)
if friend.outgoing:
send(display_name,l("friend_accept",name(friend)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(display_name,l("friend_add",name(friend)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_friend_remove(self, friend: fortnitepy.Friend) -> None:
if not self.isready or not friend:
return
display_name = name(self.user)
self.add_cache(friend)
if data['loglevel'] == 'normal':
send(display_name,l("friend_remove",name(friend)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(display_name,l("friend_remove",f'{name(friend)} [{platform_to_str(friend.platform)}]'),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
if member.id == self.user.id:
self.visual_members = self.party.meta.squad_assignments
if self.party.leader.id == self.user.id:
try:
if self.party.leader.id == self.user.id:
await asyncio.sleep(1)
prop = self.party.meta.set_squad_assignments(self.visual_members)
await self.party.patch(updated=prop)
if data['fortnite']['hide-user']:
if (not (getattr(self.owner,"id",None) == member.id and data['fortnite']['show-owner'])
and not (member.id in whitelist and data['fortnite']['show-whitelist'])
and not (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['show-bot'])
and member.id != self.user.id):
await self.hide(member.id)
elif data['fortnite']['hide-blacklist']:
if member.id in blacklist:
await self.hide(member.id)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l('party_member_joined',name(member),member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l('party_member_joined',f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if member.id in blacklist and self.party.me.leader:
if data['fortnite']['blacklist-autokick']:
try:
await member.kick()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif data['fortnite']['blacklist-autochatban']:
try:
await member.chatban()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['fortnite']['addfriend']:
for member in member.party.members.copy().keys():
try:
if not self.has_friend(member) and member != self.user.id:
await self.add_friend(member)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if self.joinmessageenable:
try:
await self.party.send(data['fortnite']['joinmessage'])
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if self.randommessageenable:
try:
randommessage = random.choice(data['fortnite']['randommessage'].split(','))
send(display_name,f'{l("random_message")}: {randommessage}',add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
await self.party.send(randommessage)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await asyncio.sleep(0.1)
if data["fortnite"]["joinemote"]:
try:
await self.change_asset(self.user.id, "Emote", self.eid)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if self.party.leader.id == self.user.id:
try:
await self.party.set_playlist(data['fortnite']['playlist'])
await self.party.set_privacy(data['fortnite']['privacy'])
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_leave(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
try:
if self.party.leader.id == self.user.id:
await asyncio.sleep(1)
prop = self.party.meta.set_squad_assignments(self.visual_members)
await self.party.patch(updated=prop)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_left",name(member),member.party.member_count),magenta,lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_left",f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',member.party.member_count),magenta,lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
if data['fortnite']['addfriend']:
for member in member.party.members.copy().keys():
if not self.has_friend(member) and member.id != self.user.id:
try:
await self.add_friend(member)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
continue
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_confirm(self, confirmation: fortnitepy.PartyJoinConfirmation) -> None:
if not self.isready or not confirmation:
return
self.add_cache(confirmation.user)
display_name = name(self.user)
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,l("party_member_request",name(confirmation.user)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if data['fortnite']['blacklist-autokick'] and confirmation.user.id in blacklist:
try:
await confirmation.reject()
except fortnitepy.HTTPException:
if data['loglevel'] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_declining_partyrequest"),red,add_d=lambda x:f'>>> {x}')
else:
try:
await confirmation.confirm()
except fortnitepy.HTTPException:
if data['loglevel'] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_accepting_partyrequest"),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_kick(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name_ = self.is_most()
self.visual_members = [{"memberId": i["memberId"], "absoluteMemberIdx": i["absoluteMemberIdx"]} for i in self.party.meta.squad_assignments]
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_kick",name(member.party.leader),name(member),member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_kick",f'{name(member.party.leader)} [{platform_to_str(member.party.leader.platform)}/{member.party.leader.input}]',f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
async def event_party_member_promote(self, old_leader: fortnitepy.PartyMember, new_leader: fortnitepy.PartyMember) -> None:
if not self.isready or not old_leader or not new_leader:
return
self.add_cache(old_leader)
self.add_cache(new_leader)
display_name = name(self.user)
display_name_ = self.is_most()
try:
if new_leader.id == self.user.id:
if data['fortnite']['hide-user']:
await self.hide()
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_promote",name(old_leader),name(new_leader)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_promote",f'{name(old_leader)} [{platform_to_str(old_leader.platform)}/{old_leader.input}]',f'{name(new_leader)} [{platform_to_str(new_leader.platform)}/{new_leader.input}]'),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if new_leader.id == self.user.id:
try:
await self.party.set_playlist(data['fortnite']['playlist'])
await self.party.set_privacy(data['fortnite']['privacy'])
for member in self.party.members.copy().values():
if member.id in blacklist:
if data['fortnite']['blacklist-autokick']:
try:
await member.kick()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif data['fortnite']['blacklist-autochatban']:
try:
await member.chatban()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_playlist_change(self, party: fortnitepy.ClientParty, before: tuple, after: tuple) -> None:
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,after[0])
async def event_party_member_update(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,l("party_member_update", f"{name(member)} [{platform_to_str(member.platform)}/{member.input}]"),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if member.id == self.user.id:
return
if member.id in blacklist and self.party.me.leader:
if data['fortnite']['blacklist-autokick']:
try:
await member.kick()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif data['fortnite']['blacklist-autochatban']:
try:
await member.chatban()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_outfit_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if after:
await self.party_member_outfit_change(member)
async def event_party_member_backpack_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if after:
await self.party_member_backpack_change(member)
async def event_party_member_pet_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if after:
await self.party_member_backpack_change(member)
async def event_party_member_pickaxe_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if after:
await self.party_member_pickaxe_change(member)
async def event_party_member_emote_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if after:
await self.party_member_emote_change(member)
async def event_party_member_emoji_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if after:
await self.party_member_emote_change(member)
async def event_party_member_disconnect(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_disconnect",name(member)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_disconnect",f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]'),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if self.party.me.leader:
try:
await member.kick()
except Exception:
if data['loglevel'] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_chatban(self, member: fortnitepy.PartyMember, reason: Optional[str]) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name_ = self.is_most()
if display_name_:
if data['loglevel'] == 'normal':
if not reason:
send(display_name_,l("party_member_chatban",name(member.party.leader),name(member)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_chatban2",name(member.party.leader),name(member),reason),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
if not reason:
send(display_name_,l("party_member_chatban",name(member.party.leader),name(member)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_chatban2",f'{name(member.party.leader)} [{platform_to_str(member.party.leader.platform)}/{member.party.leader.input}]',f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',reason),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
async def event_party_update(self, party: fortnitepy.Party) -> None:
if not self.isready or not party:
return
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,l("party_update"),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None:
await process_command(message)
async def event_party_message(self, message: fortnitepy.PartyMessage) -> None:
await process_command(message)
if True: #Functions
def now() -> str:
return datetime.datetime.now().strftime("%H:%M:%S")
def l(key: str, *args: Any, **kwargs: Any) -> Optional[str]:
text = localize.get(key)
if text:
return text.format(*args, **kwargs)
else:
return None
def name(user: Union[fortnitepy.user.UserBase, discord.user.User, WebUser]) -> str:
if data['loglevel'] == 'normal':
return user.display_name
else:
return f"{user.display_name} / {user.id}"
def render_template(file_: str, **kwargs: Any) -> str:
template = env.get_template(file_)
return sanic.response.html(template.render(**kwargs))
def dprint() -> None:
text_max = 1990
while True:
if data['discord-log']:
if data['skip-if-overflow'] and len(storedlogs) >= 50:
storedlogs.clear()
for num,log in enumerate(storedlogs):
try:
username = list(log.keys())[0]
content = list(log.values())[0]
if len(content) > text_max:
if data["omit-over2000"]:
text = content[:text_max] + "..."
res = requests.post(
data['webhook'],
json={
'username': username,
'content': text
}
)
else:
text = [content[i:i+text_max] for i in range(0, len(content), text_max)]
for text_ in text:
res = requests.post(
data['webhook'],
json={
'username': username,
'content': text_
}
)
if res.status_code == 429:
break
else:
continue
break
else:
res = requests.post(
data['webhook'],
json={
'username': username,
'content': content
}
)
if res.status_code == 204:
storedlogs.pop(num)
if res.status_code == 429:
break
except TypeError:
if data['loglevel'] =='debug':
print(red(traceback.format_exc()))
try:
storedlogs.pop(num)
except Exception:
pass
continue
except Exception:
print(red(traceback.format_exc()))
print(red(f"{username}: {content} の送信中にエラーが発生しました"))
continue
time.sleep(5)
def dstore(username: str, content: Any) -> None:
if data['discord-log']:
if data['hide-email']:
for email in data['fortnite']['email'].split(','):
content = content.replace(email,len(email)*"X")
if data['hide-token']:
for token in data['discord']['token'].split(','):
content = content.replace(token,len(token)*"X")
if data['hide-webhook']:
for webhook in data['webhook'].split(','):
content = content.replace(webhook,len(webhook)*"X")
if len(storedlogs) > 0:
if list(storedlogs[len(storedlogs)-1].keys())[0] == username:
storedlogs[len(storedlogs)-1][username] += f'\n{content}'
else:
storedlogs.append({username: content})
else:
storedlogs.append({username: content})
def send(user_name: str, content: Any, color: Optional[Callable] = None, add_p: Optional[Callable] = None, add_d: Optional[Callable] = None) -> Optional[str]:
content = str(content)
if not data['no-logs'] or color is red:
if not color:
if not add_p:
print(content)
else:
print(add_p(content))
else:
if not add_p:
print(color(content))
else:
print(color(add_p(content)))
content = discord.utils.escape_markdown(content)
if not add_d:
dstore(user_name,content)
else:
dstore(user_name,add_d(content))
def platform_to_str(platform: fortnitepy.Platform) -> Optional[str]:
converter = {
fortnitepy.Platform.WINDOWS: "Windows",
fortnitepy.Platform.MAC: "Mac",
fortnitepy.Platform.PLAYSTATION: "PlayStation",
fortnitepy.Platform.XBOX: "Xbox",
fortnitepy.Platform.SWITCH: "Switch",
fortnitepy.Platform.IOS: "IOS",
fortnitepy.Platform.ANDROID: "Android"
}
return converter.get(platform)
def convert_to_type(text: str) -> Optional[str]:
if True in [text.lower() in commands[key].split(",") for key in outfit_keys] or text.lower().startswith("cid_"):
return "Outfit"
elif True in [text.lower() in commands[key].split(",") for key in backpack_keys] or text.lower().startswith("bid_"):
return "Back Bling"
elif True in [text.lower() in commands[key].split(",") for key in pet_keys] or text.lower().startswith("petcarrier_"):
return "Pet"
elif True in [text.lower() in commands[key].split(",") for key in pickaxe_keys] or text.lower().startswith("pickaxe_id"):
return "Harvesting Tool"
elif True in [text.lower() in commands[key].split(",") for key in emote_keys] or text.lower().startswith("eid_"):
return "Emote"
elif True in [text.lower() in commands[key].split(",") for key in emoji_keys] or text.lower().startswith("emoji_"):
return "Emoticon"
elif True in [text.lower() in commands[key].split(",") for key in toy_keys] or text.lower().startswith("toy_"):
return "Toy"
elif True in [text.lower() in commands[key].split(",") for key in item_keys]:
return "Item"
def convert_to_asset(text: str) -> Optional[str]:
if True in [text.lower() in commands[key].split(",") for key in outfit_keys] or text.lower().startswith("cid_"):
return "outfit"
elif True in [text.lower() in commands[key].split(",") for key in backpack_keys] or text.lower().startswith("bid_"):
return "backpack"
elif True in [text.lower() in commands[key].split(",") for key in pet_keys] or text.lower().startswith("petcarrier_"):
return "backpack"
elif True in [text.lower() in commands[key].split(",") for key in pickaxe_keys] or text.lower().startswith("pickaxe_id"):
return "pickaxe"
elif True in [text.lower() in commands[key].split(",") for key in emote_keys] or text.lower().startswith("eid_"):
return "emote"
elif True in [text.lower() in commands[key].split(",") for key in emoji_keys] or text.lower().startswith("emoji_"):
return "emote"
elif True in [text.lower() in commands[key].split(",") for key in toy_keys] or text.lower().startswith("toy_"):
return "emote"
def convert_to_id(text: str) -> Optional[str]:
if True in [text.lower() in commands[key].split(",") for key in outfit_keys] or text.lower().startswith("cid_"):
return "cid"
elif True in [text.lower() in commands[key].split(",") for key in backpack_keys] or text.lower().startswith("bid_"):
return "bid"
elif True in [text.lower() in commands[key].split(",") for key in pet_keys] or text.lower().startswith("petcarrier_"):
return "petcarrier"
elif True in [text.lower() in commands[key].split(",") for key in pickaxe_keys] or text.lower().startswith("pickaxe_id"):
return "pickaxe_id"
elif True in [text.lower() in commands[key].split(",") for key in emote_keys] or text.lower().startswith("eid_"):
return "eid"
elif True in [text.lower() in commands[key].split(",") for key in emoji_keys] or text.lower().startswith("emoji_"):
return "emoji_id"
elif True in [text.lower() in commands[key].split(",") for key in toy_keys] or text.lower().startswith("toy_"):
return "toy_id"
elif True in [text.lower() in commands[key].split(",") for key in item_keys]:
return "id"
def convert_to_old_type(text: str) -> Optional[str]:
converter = {
"outfit": "outfit",
"back bling": "backpack",
"pet": "pet",
"harvesting tool": "pickaxe",
"emote": "emote",
"emoticon":" emoji",
"toy": "toy",
"item": "item"
}
return converter.get(text.lower())
def convert_to_new_type(text: str) -> Optional[str]:
converter = {
"outfit": "Outfit",
"backpack": "Back Bling",
"pet": "pet",
"pickaxe": "Harvesting Tool",
"emote": "Emote",
"emoji": "Emoticon",
"toy": "Toy",
"item": "Item"
}
return converter.get(text.lower())
def convert_backend_type(backendType: str) -> str:
converter = {
"AthenaBackpack": "Back Bling",
"AthenaPickaxe": "Harvesting Tool",
"AthenaItemWrap": "Wrap",
"AthenaGlider": "Glider",
"AthenaCharacter": "Outfit",
"AthenaPet": "Pet",
"AthenaMusicPack": "Music",
"AthenaLoadingScreen": "Loading Screen",
"AthenaDance": "Emote",
"AthenaSpray": "Spray",
"AthenaEmoji": "Emoticon",
"AthenaSkyDiveContrail": "Contrail",
"AthenaPetCarrier": "Pet",
"AthenaToy": "Toy",
"AthenaConsumableEmote": "Emote",
"AthenaBattleBus": "Battle Bus",
"AthenaRewardEventGraphCosmetic": "Outfit",
"AthenaVictoryPose": "Emote"
}
return converter.get(backendType)
def convert_variant(type_: str, variants: dict) -> List[dict]:
result = []
for variant in variants:
for option in variant['options']:
result.append({"name": option['name'], 'variants': [{'item': type_, 'channel': variant['channel'], 'variant': option['tag']}]})
return result
def get_device_auth_details() -> None:
if os.path.isfile(filename):
with open(filename, 'r') as f:
return json.load(f)
return {}
def store_device_auth_details(email: str, details: dict) -> None:
existing = get_device_auth_details()
existing[email.lower()] = details
with open(filename, 'w') as f:
json.dump(existing, f)
def load_json(filename: str) -> Union[list,dict]:
try:
with open(filename,encoding='utf-8') as f:
data = json.load(f)
except json.decoder.JSONDecodeError:
with open(filename,encoding='utf-8-sig') as f:
data = json.load(f)
return data
def load_config(client: Optional[fortnitepy.Client] = None) -> bool:
global data
global commands
global replies
try:
data = load_json("config.json")
except json.decoder.JSONDecodeError as e:
send('ボット',f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send('ボット','config.json ファイルの読み込みに失敗しました。正しく書き込めているか確認してください',red,add_d=lambda x:f'>>> {x}')
send('Bot','Failed to load config.json file. Make sure you wrote correctly',red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send('ボット','config.json ファイルが存在しません',red,add_d=lambda x:f'>>> {x}')
send('Bot','config.json file does not exist',red,add_d=lambda x:f'>>> {x}')
return False
if data.get('loglevel','normal') == 'debug':
send('ボット',f'\n{json.dumps(data,ensure_ascii=False,indent=4)}\n',yellow,add_d=lambda x:f'\n```{x}```\n')
for key in config_tags.keys():
try:
eval(f"data{key}")
except KeyError:
error_config.append(key)
checks = [
['fortnite','blacklist'],
['fortnite','whitelist'],
['fortnite','invitelist'],
['fortnite','otherbotlist'],
['discord','blacklist'],
['discord','whitelist']
]
for check in checks:
k,k2 = check
for value in data.get(k,{}).get(k2,[]).copy():
if len(str(value)) == 0:
data.get(k,{}).get(k2,[]).remove(value)
with open("config.json", 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def set_default(keys: list, default: Any, func: Optional[Callable] = None) -> None:
text = ""
text2 = ""
for nest,key in enumerate(keys):
text += f"['{key}']"
if nest == len(keys):
text2 += f".get('{key}','{default}')"
else:
text2 += f"['{key}']"
if func:
var = func(eval(f"data{text2}"))
exec(f"data{text} = var")
else:
exec(f"data{text} = data{text2}")
set_default(['fortnite'],{})
set_default(['fortnite','privacy'],'public',lambda x: eval(f"fortnitepy.PartyPrivacy.{x.upper()}"))
set_default(['discord','channels'],['{name}-command-channel'],lambda x: [i.replace(" ","-").replace(".","-").replace(",","-").replace("--","-").lower() for i in x])
set_default(['web'],{})
set_default(['web','ip'],'{ip}')
set_default(['web','port'],8080)
set_default(['web','login_required'],False)
set_default(['lang'],'en')
set_default(['caseinsensitive'],False)
set_default(['no-logs'],False)
set_default(['discord-log'],False)
set_default(['search_max'],60)
set_default(['omit-over2000'],False)
set_default(['skip-if-overflow'],False)
set_default(['hide-email'],False)
set_default(['hide-token'],False)
set_default(['hide-webhook'],False)
set_default(['loglevel'],'normal')
if data.get("status",1) == 0:
config_tags["['fortnite']['email']"].append("red")
config_tags["['lang']"].append("red")
if os.getcwd().startswith('/app') or os.getcwd().startswith('/home/runner'):
data['web']['ip']="0.0.0.0"
else:
data['web']['ip'] = data['web']['ip'].format(ip=socket.gethostbyname(socket.gethostname()))
if client:
client.eid=data['fortnite']['eid']
client.whisper=data['fortnite']['whisper']
client.partychat=data['fortnite']['partychat']
client.discord=data['discord']['discord']
client.web=data['web']['web']
client.whisperperfect=data['fortnite']['disablewhisperperfectly']
client.partychatperfect=data['fortnite']['disablepartychatperfectly']
client.discordperfect=data['discord']['disablediscordperfectly']
client.joinmessageenable=data['fortnite']['joinmessageenable']
client.randommessageenable=data['fortnite']['randommessageenable']
client.outfitmimic=data['fortnite']['outfitmimic']
client.backpackmimic=data['fortnite']['backpackmimic']
client.pickaxemimic=data['fortnite']['pickaxemimic']
client.emotemimic=data['fortnite']['emotemimic']
client.acceptinvite=data['fortnite']['acceptinvite']
client.acceptfriend=data['fortnite']['acceptfriend']
if error_config:
send('ボット',f'config.json ファイルの読み込みに失敗しました。キーの名前が間違っていないか確認してください。アップデート後の場合は、最新のconfig.jsonファイルを確認してください\n{", ".join(error_config)} がありません',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'Failed to load config.json file. Make sure key name is correct. If this after update, plase check latest config.json file\n{", ".join(error_config)} is missing',red,add_d=lambda x:f'>>> {x}')
else:
flag = False
try:
res = requests.get('https://benbotfn.tk/api/v1/cosmetics/br/DOWN_CHECK')
except Exception:
flag = True
if data['loglevel'] == 'debug':
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
if res.status_code == 503:
flag = True
if flag:
send('ボット','APIがダウンしているため、アイテムデータをダウンロードできませんでした。しばらく待ってからもう一度起動してみてください',red,add_d=lambda x:f'>>> {x}')
send('Bot','Failed to download item data because API is dawning. Please try again later',red,add_d=lambda x:f'>>> {x}')
else:
os.makedirs("items/", exist_ok=True)
langs = [data['search-lang'],"en"] if data['search-lang'] != "en" else [data['search-lang']]
Thread(target=store_item_data,args=(langs,)).start()
def load_lang(lang: str) -> None:
global localize
try:
localize = load_json(f"lang/{lang}.json")
except json.decoder.JSONDecodeError as e:
send('ボット',f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send('ボット',f'{data["lang"]}.json ファイルの読み込みに失敗しました。正しく書き込めているか確認してください\n',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'Failed to load {data["lang"]}.json file. Make sure you wrote correctly',red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send('ボット',f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send('ボット',f'{data["lang"]}.json ファイルが存在しません',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'{data["lang"]}.json file does not exist',red,add_d=lambda x:f'>>> {x}')
return False
for key in localize_keys:
try:
eval(f"localize['{key}']")
except KeyError:
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send('ボット',f'{lang}.json ファイルの読み込みに失敗しました。キーの名前が間違っていないか確認してください\n{e} がありません',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'Failed to load {lang}.json file. Make sure key name is correct\n{e} is missing',add_d=lambda x:f'>>> {x}')
return False
return True
if os.path.isfile(f"lang/{data['lang']}.json"):
if not load_lang(data['lang']):
return False
else:
if not load_lang("en"):
return False
color = data['fortnite']['avatar_color'].split(',') if data['fortnite']['avatar_color'] is not None else ""
if len(color) > 2:
background_colors = [color[0], color[1], color[2]]
elif len(color) == 1:
try:
background_colors = eval(f"fortnitepy.KairosBackgroundColorPreset.{color[0]}")
except AttributeError:
send(l('bot'),l('color_must_be'))
return False
else:
background_colors = None
data['fortnite']['avatar_color'] = background_colors
try:
commands = load_json("commands.json")
except json.decoder.JSONDecodeError as e:
send(l('bot'),f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_json", "commands.json"),red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send(l('bot'),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_notfound", "commands.json"),red,add_d=lambda x:f'>>> {x}')
return False
if data['loglevel'] == 'debug':
send(l('bot'),f'\n{json.dumps(commands,ensure_ascii=False,indent=4)}\n',yellow,add_d=lambda x:f'\n```{x}```\n')
for key in commands_tags.keys():
try:
eval(f"commands{key}")
except KeyError:
error_commands.append(key)
def set_default_(key: str, default: Any, func: Optional[Callable] = None) -> None:
text = f"['{key}']"
text2 = f".get('{key}','{default}')"
if func:
exec(f"commands{text} = {func}(commands{text2})")
else:
exec(f"commands{text} = commands{text2}")
set_default_("ownercommands","")
if error_commands:
send(l('bot'),f'{l("load_failed_keyerror", "commands.json")}\n{l("is_missing", ", ".join(error_commands))}',red,add_d=lambda x:f'>>> {x}')
if data['caseinsensitive']:
commands = {k.lower(): jaconv.kata2hira(v.lower()) for k,v in commands.items()}
flag = True
for command in commands['ownercommands'].split(','):
if not commands.get(command):
if flag:
error_commands.append("['ownercommands']")
flag = False
send(l('bot'),l('failed_ownercommand'),red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l('is_missing',command),red,add_d=lambda x:f'>>> {x}')
if not flag:
send(l('bot'),l('failed_ownercommand'),red,add_d=lambda x:f'>>> {x}')
try:
replies = load_json("replies.json")
except json.decoder.JSONDecodeError as e:
send(l('bot'),f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_json", "replies.json"),red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send(l('bot'),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_notfound", "replies.json"),red,add_d=lambda x:f'>>> {x}')
return False
return True
def get_item_data(lang: str) -> dict:
res = requests.get("https://benbotfn.tk/api/v1/cosmetics/br", params={"lang": lang})
if res.status_code == 200:
return res.json()
return None
def store_item_data(langs: list) -> None:
ignoretype = [
"Contrail",
"Glider",
"Wrap",
"Loading Screen",
"Music",
"Spray",
"Battle Bus"
]
with ThreadPoolExecutor() as executor:
futures = {executor.submit(get_item_data,lang): lang for lang in langs}
for future in as_completed(futures):
lang = futures[future]
result = future.result()
data_ = {}
if data["loglevel"] == "debug":
send(l("bot"),f"Saving {lang} items",yellow)
for item in result:
type_ = convert_backend_type(item["backendType"])
if type_ in ignoretype:
continue
if not data_.get(type_):
data_[type_] = []
data_[type_].append(item)
for type_,items in data_.items():
with open(f"items/{type_}_{lang}.json","w",encoding="utf-8") as f:
json.dump(items,f,ensure_ascii=False,indent=4)
if data["loglevel"] == "debug":
send(l("bot"),f"Saved {lang} items",yellow)
def get_banner_data() -> dict:
res = requests.get("https://benbotfn.tk/api/v1/exportAsset?path=FortniteGame/Content/Banners/BannerIcons")
if res.status_code == 200:
return res.json()
return None
def store_banner_data() -> None:
data = get_banner_data()
with open("items/banners.json","w",encoding="utf-8") as f:
json.dump(data,f,indent=4,ensure_ascii=False)
def partymember_backpack(member: fortnitepy.party.PartyMemberBase) -> str:
asset = member.meta.backpack
result = re.search(r".*\.([^\'\"]*)", asset.strip("'"))
if result and result.group(1) != 'None':
return result.group(1)
def partymember_emote(member: fortnitepy.party.PartyMemberBase) -> str:
asset = member.meta.emote
result = re.search(r".*\.([^\'\"]*)", asset.strip("'"))
if result and result.group(1) != 'None':
return result.group(1)
def member_asset(member: fortnitepy.party.PartyMemberBase, asset: str) -> str:
if asset in ("backpack", "pet"):
return partymember_backpack(member)
elif asset in ("emote", "emoji", "toy"):
return partymember_emote(member)
else:
return getattr(member, asset, None)
def search_item(lang: str, mode: str, text: str, type_: Optional[str] = None, cache: Optional[bool] = True) -> Optional[List[dict]]:
itemlist = []
if not cache_items.get(lang):
cache_items[lang] = []
if cache:
if mode == 'set':
data_ = cache_items[lang]
else:
data_ = [i for i in cache_items[lang] if convert_backend_type(i["backendType"]) in type_.split(',')]
else:
data_ = []
if type_ not in ["Item", None]:
with ThreadPoolExecutor() as executor:
def _open_file(filename: str) -> Union[list, dict]:
with open(filename, 'r', encoding='utf-8') as f:
d = json.load(f)
return d
futures = [executor.submit(_open_file,f'items/{i}_{lang}.json') for i in type_.split(',')]
for future in futures:
data_.extend(future.result())
else:
with ThreadPoolExecutor() as executor:
def _open_file(filename: str) -> Union[list, dict]:
with open(filename, 'r', encoding='utf-8') as f:
d = json.load(f)
return d
def _(text: str) -> str:
return re.sub(r"items(\\|/)","",text).replace(f"_{lang}.json","")
futures = [executor.submit(_open_file,f'items/{_(i)}_{lang}.json') for i in glob(f"items/*_{lang}.json") if _(i)[0].isupper()]
for future in futures:
data_.extend(future.result())
for item in data_:
try:
if convert_backend_type(item["backendType"]) in ignoretype or item in itemlist or item.get("name") is None:
continue
if mode == "name":
if data['caseinsensitive']:
text_ = jaconv.hira2kata(text.lower())
name = jaconv.hira2kata(item['name'].lower())
else:
text_ = text
name = item['name']
if text_ in name:
itemlist.append(item)
elif mode == "id":
text_ = text
if text_.lower() in item['id'].lower():
itemlist.append(item)
elif mode == "set":
if not item.get('set'):
continue
if data['caseinsensitive']:
text_ = jaconv.hira2kata(text.lower())
name = jaconv.hira2kata(item['set'].lower())
else:
text_ = text
name = item['set']
if text_ in name:
itemlist.append(item)
except Exception:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),item,red,add_d=lambda x:f'>>> {x}')
if len(itemlist) == 0:
if cache:
return search_item(lang=lang, mode=mode, text=text, type_=type_, cache=False)
else:
return None
else:
if not cache:
for item in itemlist:
if item not in cache_items[lang]:
cache_items[lang].append(item)
return itemlist
def search_style(lang: str, id_: str, type_: str, cache: Optional[bool] = True) -> Optional[List[dict]]:
if not cache_items.get(lang):
cache_items[lang] = []
if cache:
data_ = cache_items[lang]
else:
data_ = []
if type_ != "Item":
with ThreadPoolExecutor() as executor:
futures = [executor.submit(load_json,f'items/{i}_{lang}.json') for i in type_.split(',')]
for future in futures:
data_.extend(future.result())
else:
with ThreadPoolExecutor() as executor:
def _(text: str) -> str:
return re.sub(r"items(\\|/)","",text).replace(f"_{lang}.json","")
futures = [executor.submit(load_json,f'items/{_(i)}_{lang}.json') for i in glob(f"items/*_{lang}.json") if _(i)[0].isupper()]
for future in futures:
data_.extend(future.result())
variants = None
for item in data_:
if item['id'].lower() == id_.lower():
if item['variants']:
variants = convert_variant(item['backendType'], item['variants'])
break
if not variants:
if cache:
return search_style(lang=lang, id_=id_, type_=type_, cache=False)
else:
return None
else:
if not cache:
if item not in cache_items[lang]:
cache_items[lang].append(item)
return variants
def search_banner(id_: str) -> Optional[dict]:
data_ = load_json("items/banners.json")
data_ = {k.lower():v for k,v in data_.items()}
return data_.get(id_.lower())
def restart(sleep_time: Optional[Union[int,float]] = 0) -> None:
if sleep_time > 0:
time.sleep(sleep_time)
os.chdir(os.getcwd())
os.execv(os.sys.executable,['python', *sys.argv])
if True: #Asynchronous functions
async def reply(message: Union[fortnitepy.message.MessageBase, discord.Message, WebMessage], client: fortnitepy.Client, content: str) -> None:
if isinstance(message, fortnitepy.message.MessageBase):
await message.reply(content)
elif isinstance(message, discord.Message):
if len(content) > 1990:
text = discord.utils.escape_markdown(content).split("\n")
for txt in text:
if len(txt) > 1990:
text = [txt[i:i+1990] for i in range(0, len(txt), 1990)]
for t in txt:
await message.channel.send(t)
else:
await message.channel.send(content)
else:
await message.channel.send(content)
elif isinstance(message, WebMessage):
message.reply(content)
elif isinstance(message, AllMessage):
message.reply(content, client)
async def aexec(code: str, variable: dict) -> Any:
def _(text) -> str:
return re.match(r"(\u0020|\u3000)*", text).end() * u"\u0020"
scode = code.split('\n')
delete = len(_(scode[0]))
lines = [i.replace(u"\u0020", "", delete) for i in scode]
exc = (
f'async def __ex(var):'
+ '\n for v in var:'
+ '\n v = var[v]'
+ ''.join(f'\n {l}' for l in lines)
+ '\n for v in locals():'
+ '\n var[v] = locals()[v]'
)
if data['loglevel'] == 'debug':
send(l('bot'),exc,yellow,add_d=lambda x:f'```\n{x}\n```')
exec(exc)
variable_before = variable.copy()
result = await locals()['__ex'](variable)
variable_after = variable.copy()
newvar = {k: v for k,v in variable_after.items() if (k not in variable_before.keys() or v != variable_before.get(k)) and "_" not in k and k not in ("k", "v") and isinstance(k, str)}
for k in newvar:
exc = (
f"global {k}"
+ f"\n{k} = newvar['{k}']"
)
exec(exc)
return result
async def generate_device_auth_and_store(email: str) -> str:
global web_text
access_token,expires_at = await get_token()
while True:
flag = False
while True:
device_auth_details = await get_device_code(access_token)
send(l('bot'),l('get_code', email, device_auth_details['verification_uri_complete']))
web_text = l('get_code2', email, device_auth_details['verification_uri_complete'])
device_auth = await device_code_auth(device_auth_details["device_code"])
if not device_auth:
send(l('bot'),l('authorization_expired'))
if expires_at < datetime.datetime.utcnow():
access_token, expires_at = await get_token()
else:
fortnite_access_token, fortnite_expires_at = await get_fortnite_token(device_auth["access_token"])
user = await lookup_user(device_auth["in_app_id"], fortnite_access_token)
if user["email"].lower() == email.lower():
flag = True
break
else:
send(l('bot'),l('account_incorrect', email))
break
if flag == True:
break
exchange_code = await exchange(device_auth["access_token"])
launcher_access_token, client_id = await exchange_code_auth(exchange_code)
details = await generate_device_auth(client_id, launcher_access_token)
store_device_auth_details(email.lower(), details)
web_text = ""
return details
async def get_token() -> tuple:
async with aiohttp.ClientSession() as session:
data = await session.post(
oauth_url,
headers={
"Authorization": f"basic {launcher_token}"
},
data={
"grant_type": "client_credentials",
"token_type": "eg1"
}
)
data = await data.json()
return data["access_token"], datetime.datetime.fromisoformat(data["expires_at"].replace("Z",""))
async def get_fortnite_token(access_token: str) -> tuple:
exchange_code = await exchange(access_token)
async with aiohttp.ClientSession() as session:
data = await session.post(
fortnite_token_url,
headers={
"Authorization": f"basic {fortnite_token}"
},
data={
"grant_type": "exchange_code",
"token_type": "eg1",
"exchange_code": exchange_code
}
)
data = await data.json()
return data["access_token"], datetime.datetime.fromisoformat(data["expires_at"].replace("Z",""))
async def get_device_code(access_token: str) -> dict:
async with aiohttp.ClientSession() as session:
data = await session.post(
device_auth_url,
headers={
"Authorization": f"bearer {access_token}",
"Content-Type": "application/x-www-form-urlencoded"
}
)
data = await data.json()
return data
async def device_code_auth(device_code: str) -> Optional[dict]:
async with aiohttp.ClientSession() as session:
flag = False
while True:
await asyncio.sleep(5)
data = await session.post(
oauth_url,
headers={
"Authorization": f"basic {launcher_token}"
},
data={
"grant_type": "device_code",
"device_code": device_code
}
)
data = await data.json()
if data.get("errorCode") == "errors.com.epicgames.account.oauth.authorization_pending":
if not flag:
send(l('bot'),l('waiting_for_authorization'))
flag = True
pass
elif data.get("errorCode"):
return None
else:
return data
async def exchange_code_auth(exchange_code: str) -> tuple:
async with aiohttp.ClientSession() as session:
data = await session.post(
exchange_auth_url,
headers={
"Authorization": f"basic {launcher_token}"
},
data={
"grant_type": "exchange_code",
"exchange_code": exchange_code,
"token_type": "eg1"
}
)
data = await data.json()
return data["access_token"], data["account_id"]
async def exchange(access_token: str) -> str:
async with aiohttp.ClientSession() as session:
data = await session.get(
exchange_url,
headers={
"Authorization": f"bearer {access_token}"
}
)
data = await data.json()
return data["code"]
async def lookup_user(user_id: str, fortnite_access_token: str) -> dict:
async with aiohttp.ClientSession() as session:
data = await session.get(
user_lookup_url.format(user_id=user_id),
headers={
"Authorization": f"bearer {fortnite_access_token}"
}
)
data = await data.json()
return data
async def generate_device_auth(client_id: str, access_token: str) -> dict:
async with aiohttp.ClientSession() as session:
data = await session.post(
f"https://account-public-service-prod.ol.epicgames.com/account/api/public/account/{client_id}/deviceAuth",
headers={
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
)
data = await data.json()
return {"device_id": data["deviceId"], "account_id": data["accountId"], "secret": data["secret"]}
async def run_bot() -> None:
for client in clients:
client.booting = True
if data.get('restart_in') not in [None, 0]:
Timer(data.get('restart_in'), restart).start()
try:
await fortnitepy.start_multiple(
clients,
all_ready_callback=lambda: send(l("bot"),l("all_login"),green,add_p=lambda x:f'[{now()}] {x}') if len(clients) > 1 else print('')
)
except fortnitepy.AuthException as e:
if data["loglevel"] == "debug":
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if "errors.com.epicgames.account.oauth.exchange_code_not_found" in e.args[0]:
send(l("bot"),l("exchange_code_error"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
elif "Invalid device auth details passed." in e.args[0]:
email = e.args[0].split("-")[0].strip()
details = get_device_auth_details()
details.pop(email.lower())
with open(filename, 'w') as f:
json.dump(details, f)
restart()
else:
send(l("bot"),l("login_failed"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
sys.exit(1)
except fortnitepy.HTTPException as e:
if data["loglevel"] == "debug":
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if "reset" in e.args[0]:
send(l("bot"),l("password_reset_error"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
else:
send(l("bot"),l("login_failed"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
except Exception:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),l("failed_to_load_account"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
sys.exit(1)
async def run_app() -> None:
try:
await app.create_server(host=data['web']['ip'], port=data['web']['port'], return_asyncio_server=True, access_log=data['web']['log'])
except OSError:
if data["loglevel"] == "debug":
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),l("web_already_running"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
if data["status"] == 0 or bot_ready is False:
webbrowser.open(f"http://{data['web']['ip']}:{data['web']['port']}")
send(l("bot"),l("web_running",f"http://{data['web']['ip']}:{data['web']['port']}"),add_p=lambda x:f'[{now()}] {x}')
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
async def process_command(message: Union[fortnitepy.FriendMessage, fortnitepy.PartyMessage, discord.Message, WebMessage, AllMessage]):
global blacklist
global whitelist
global blacklist_
global whitelist_
global otherbotlist
if not message or not message.content:
return
loop = asyncio.get_event_loop()
content = message.content
con = content.split("\n")
if data['caseinsensitive']:
args = jaconv.kata2hira(content.lower()).split()
else:
args = content.split()
content_ = ' '.join(args[1:])
content2_ = ' '.join(args[2:])
rawargs = content.split()
rawcontent = ' '.join(rawargs[1:])
rawcontent2 = ' '.join(rawargs[2:])
check_ownercommand = True
check_ng = True
if len(args) < 1:
return
if isinstance(message, fortnitepy.message.MessageBase):
client = message.client
client.add_cache(message.author)
if ((data['discord']['enabled'] and not dclient.isready)
or (message.author.id in blacklist and data['fortnite']['blacklist-ignorecommand'])
or (message.author.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['ignorebot'])):
return
if ((len(con) > 1)
and not (args[0] in commands['eval'].split(','))
and not (args[0] in commands['exec'].split(','))):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
if isinstance(message, fortnitepy.FriendMessage):
if not client.whisper:
if client.whisperperfect:
return
elif message.author.id != getattr(client.owner,"id",None) and message.author.id not in whitelist:
return
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}]',content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} [{platform_to_str(message.author.platform)}] | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
elif isinstance(message, fortnitepy.PartyMessage):
if not client.partychat:
if client.partychatperfect:
return
elif message.author.id != getattr(client.owner,"id",None) and message.author.id not in whitelist:
return
display_name_ = client.is_most()
if display_name_:
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {name(message.author)} | {x}',add_d=lambda x:f'[{l("party")}] [{display_name_}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}]',content,add_p=lambda x:f'[{now()}] [{l("party")}/{client.party.id}] [{display_name_}] {name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}] | {x}',add_d=lambda x:f'[{l("party")}/{client.party.id}] [{display_name_}] {x}')
if rawcontent in commands['me'].split(','):
rawcontent = message.author.id
content_ = message.author.id
if ((getattr(client.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])):
check_ownercommand = False
if ((getattr(client.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['fortnite']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(message, discord.Message):
if ((not isinstance(message.channel, discord.TextChannel))
or (message.author.id == dclient.user.id)
or (message.author.id in blacklist_ and data['discord']['blacklist-ignorecommand'])
or (message.author.bot and data['discord']['ignorebot'])):
return
if True in [True for i in data['discord']['channels'] if "{name}" not in i and "{id}" not in i and message.channel.name == i]:
tasks = {}
for client_ in loadedclients:
mes = AllMessage(content, message.author, client_, message)
task = loop.create_task(process_command(mes))
tasks[client_] = [task, mes]
await asyncio.gather(*[i[0] for i in tasks.values()])
for client_,list_ in tasks.items():
result = list_[1].result[client_.user.id]
if result:
results = '\n'.join(result)
await reply(message, client_, f"[{name(client_.user)}] {results}")
return
else:
for clientname, client in client_name.items():
if not client.isready:
continue
if message.channel.name in [i.format(name=clientname, id=client.user.id).replace(" ","-").replace(".","-").replace(",","-").replace("--","-").lower() for i in data["discord"]["channels"]]:
break
else:
return
if not client.discord:
if client.discordperfect:
return
elif message.author.id != getattr(dclient.owner,"id",None) and message.author.id not in whitelist_:
return
if (len(con) > 1
and not (args[0] in commands['eval'].split(','))
and not (args[0] in commands['exec'].split(','))):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}({dclient.user})] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}({dclient.user})] {x}')
if ((getattr(dclient.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['discord']['whitelist-ownercommand'])):
check_ownercommand = False
if ((getattr(dclient.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['discord']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(message, WebMessage):
client = message.client
if ((data['discord']['enabled'] and not dclient.isready)
or (not client.web)):
return
if (len(con) > 1
and not (args[0] in commands['eval'].split(','))
and not (args[0] in commands['exec'].split(','))):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
elif isinstance(message, AllMessage):
client = message.client
if data['discord']['enabled'] and not dclient.isready:
return
if (len(con) > 1
and not (args[0] in commands['eval'].split(','))
and not (args[0] in commands['exec'].split(','))):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
if isinstance(message.base, fortnitepy.message.MessageBase):
client.add_cache(message.author)
if ((message.author.id in blacklist and data['fortnite']['blacklist-ignorecommand'])
or (message.author.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['ignorebot'])):
return
if isinstance(message.base, fortnitepy.FriendMessage):
if not client.whisper:
if client.whisperperfect:
return
elif message.author.id != getattr(client.owner,"id",None) and message.author.id not in whitelist:
return
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}]',content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
elif isinstance(message.base, fortnitepy.PartyMessage):
if not client.partychat:
if client.partychatperfect:
return
elif message.author.id != getattr(client.owner,"id",None) and message.author.id not in whitelist:
return
display_name = client.is_most()
if display_name:
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{l("party")}] [{display_name}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}]',content,add_p=lambda x:f'[{now()}] [{l("party")}/{client.party.id}] [{display_name}] {name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}] | {x}',add_d=lambda x:f'[{l("party")}/{client.party.id}] [{display_name}] {x}')
if rawcontent in commands['me'].split(','):
rawcontent = message.author.id
content_ = message.author.id
if ((getattr(client.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])):
check_ownercommand = False
if ((getattr(client.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['fortnite']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(message.base, discord.message.Message):
if ((message.author.id == dclient.user.id)
or (message.author.id in blacklist_ and data['discord']['blacklist-ignorecommand'])
or (message.author.bot and data['discord']['ignorebot'])):
return
if not client.discord:
if client.discordperfect:
return
elif message.author.id != getattr(dclient.owner,"id",None) and message.author.id not in whitelist_:
return
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}({dclient.user})] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}({dclient.user})] {x}')
if ((getattr(dclient.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['discord']['whitelist-ownercommand'])):
check_ownercommand = False
if ((getattr(dclient.owner,"id",None) == message.author.id)
or (message.author.id in whitelist and data['discord']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(message.base, WebMessage):
if ((data['discord']['enabled'] and not dclient.isready)
or (not client.web)):
return
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
if not client.isready:
return
display_name = name(client.user)
if check_ownercommand:
for command in commands['ownercommands'].split(','):
if args[0] in commands[command].split(","):
await reply(message, client, l("this_command_owneronly"))
return
reply_flag = False
for key,value in replies.items():
if data["replies-matchmethod"] == "contains":
if [k for k in key.split(',') if k in content]:
reply_flag = True
elif data["replies-matchmethod"] == "full":
if [k for k in key.split(',') if k == content]:
reply_flag = True
elif data["replies-matchmethod"] == "starts":
if [k for k in key.split(',') if content.startswith(k)]:
reply_flag = True
elif data["replies-matchmethod"] == "ends":
if [k for k in key.split(',') if content.endswith(k)]:
reply_flag = True
if reply_flag:
await reply(message, client, value)
if check_ng:
flag = False
if data["ng-word-matchmethod"] == "contains":
if [ng for ng in data["ng-words"] if ng in content]:
flag = True
elif data["ng-word-matchmethod"] == "full":
if [ng for ng in data["ng-words"] if ng == content]:
flag = True
elif data["ng-word-matchmethod"] == "starts":
if [ng for ng in data["ng-words"] if content.startswith(ng)]:
flag = True
elif data["ng-word-matchmethod"] == "ends":
if [ng for ng in data["ng-words"] if content.endswith(ng)]:
flag = True
if flag:
if data["ng-word-blacklist"]:
if isinstance(message, fortnitepy.message.MessageBase):
blacklist.append(message.author.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"].append(message.author.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
elif isinstance(message, discord.Message):
blacklist_.append(message.author.id)
data_ = load_json("config.json")
data_["discord"]["blacklist"].append(message.author.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
member = client.party.members.get(message.author.id)
if member and client.party.me.leader:
if data["ng-word-kick"]:
try:
await message.author.kick()
except Exception as e:
if data["loglevel"] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
elif data["ng-word-chatban"]:
try:
await message.author.chatban()
except Exception as e:
if data["loglevel"] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
return
if reply_flag:
return
if args[0] in commands['prev'].split(','):
c = client.prevmessage.get(message.author.id,"")
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
await task
result = task.result
if result:
await reply(message, client, result)
return
elif args[0] in commands['eval'].split(','):
try:
if rawcontent == "":
await reply(message, client, f"[{commands['eval']}] [{l('eval')}]")
return
variable=globals()
variable.update(locals())
if rawcontent.startswith("await "):
if data['loglevel'] == "debug":
send(display_name,f"await eval({rawcontent.replace('await ','',1)})",yellow,add_d=lambda x:f'```\n{x}\n```')
result = await eval(rawcontent.replace("await ","",1), variable)
await reply(message, client, str(result))
else:
if data['loglevel'] == "debug":
send(display_name,f"eval {rawcontent}",yellow,add_d=lambda x:f'```\n{x}\n```')
result = eval(rawcontent, variable)
await reply(message, client, str(result))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
elif args[0] in commands['exec'].split(','):
try:
if rawcontent == "":
await reply(message, client, f"[{commands['exec']}] [{l('exec')}]")
return
variable = globals()
variable.update(locals())
args_ = content.split(" ")
content_ = " ".join(args_[1:])
result = await aexec(content_, variable)
await reply(message, client, str(result))
except Exception as e:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
if data['discord']['enabled']:
if args[0] in commands['addblacklist_discord'].split(','):
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['addblacklist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id not in blacklist_:
blacklist_.append(user.id)
data_ = load_json("config.json")
data_["discord"]["blacklist"].append(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('add_to_list', f'{name(user)}', l('discord_blacklist')),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_to_list', f'{name(user)}', l('discord_blacklist')))
else:
await reply(message, client, l('already_list', f'{name(user)}', l('discord_blacklist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeblacklist_discord'].split(','):
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['removeblacklist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id in blacklist_:
blacklist_.remove(user.id)
data_ = load_json("config.json")
data_["discord"]["blacklist"].remove(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', f'{name(user)}', l('discord_blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_from_list', f'{name(user)}', l('discord_blacklist')))
else:
await reply(message, client, l('not_list', f'{name(user)}', l('discord_blacklist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l('user_notfound'),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,traceback.format_exc(),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addwhitelist_discord'].split(','):
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['addwhitelist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id not in whitelist_:
whitelist_.append(user.id)
data_ = load_json("config.json")
data_["discord"]["whitelist"].append(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', f'{name(user)}', l('discord_whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_from_list', f'{name(user)}', l('discord_whitelist')))
else:
await reply(message, client, l('already_list', f'{name(user)}', l('discord_whitelist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removewhitelist_discord'].split(','):
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['removewhitelist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id in whitelist_:
whitelist_.remove(user.id)
data_ = load_json("config.json")
data_["discord"]["whitelist"].remove(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', f'{name(user)}', l('discord_whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_list', f'{name(user)}', l('discord_whitelist')))
else:
await reply(message, client, l('not_list', f'{name(user)}', l('discord_whitelist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
if args[0] in commands['restart'].split(','):
try:
if not client.acceptinvite:
if isinstance(message, fortnitepy.message.MessageBase) or isinstance(getattr(message,"base",None), fortnitepy.message.MessageBase):
if (not (message.author.id == getattr(client.owner,"id",None))
and not (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])
and not (message.author.id == getattr(dclient.owner,"id",None))
and not (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
await reply(message, client, l('invite_is_decline'))
return
await reply(message, client, l('restarting'))
Thread(target=restart,args=(0.5,)).start()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['relogin'].split(','):
try:
if client.acceptinvite is False:
if isinstance(message, fortnitepy.message.MessageBase) or isinstance(getattr(message,"base",None), fortnitepy.message.MessageBase):
if (not (message.author.id == getattr(client.owner,"id",None))
and not (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])
and not (message.author.id == getattr(dclient.owner,"id",None))
and not (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
await reply(message, client, l('invite_is_decline'))
return
await reply(message, client, l('relogining'))
await client.restart()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['reload'].split(','):
success = load_config(client)
try:
if success:
await reply(message, client, l('success'))
else:
await reply(message, client, l('error'))
return
try:
if data['fortnite']['avatar_id'] == "{bot}":
client.set_avatar(fortnitepy.Avatar(asset=client.party.me.outfit, background_colors=data['fortnite']['avatar_color']))
else:
client.set_avatar(fortnitepy.Avatar(asset=data['fortnite']['avatar_id'].format(bot=client.party.me.outfit), background_colors=data['fortnite']['avatar_color']))
except Exception:
if data['loglevel'] == 'debug':
send(name(client.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
owner = None
try:
owner = await client.fetch_profile(data['fortnite']['owner'])
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not owner:
send(display_name,l("owner_notfound"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.add_cache(owner)
owner = client.get_friend(owner.id)
if not owner:
if data['fortnite']['addfriend']:
send(display_name,l("not_friend_with_owner",commands["reload"]),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
try:
await client.add_friend(owner.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
client.owner = owner
send(display_name,f'{l("owner")}: {name(client.owner)}',green,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
if client.owner:
await client.owner.send(l("click_invite"))
await client.owner.send(l("click_invite"))
for blacklistuser in data['fortnite']['blacklist']:
try:
user = await client.fetch_profile(blacklistuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
client.add_cache(user)
if not user:
send(display_name,l("blacklist_user_notfound",blacklistuser),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
blacklist.append(user.id)
if data['fortnite']['blacklist-autoblock']:
try:
await user.block()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f'blacklist: {blacklist}',yellow,add_d=lambda x:f'```\n{x}\n```')
for whitelistuser in data['fortnite']['whitelist']:
try:
user = await client.fetch_profile(whitelistuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
client.add_cache(user)
if not user:
send(display_name,l("whitelist_user_notfound",whitelistuser),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
whitelist.append(user.id)
if data['loglevel'] == "debug":
send(display_name,f'whitelist {whitelist}',yellow,add_d=lambda x:f'```\n{x}\n```')
for otherbotlistuser in data['fortnite']['otherbotlist']:
try:
user = await client.fetch_profile(otherbotlistuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
client.add_cache(user)
if not user:
send(display_name,l("botlist_user_notfound",otherbotlistuser),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
otherbotlist.append(user.id)
if data['loglevel'] == "debug":
send(display_name,f'botlist {otherbotlist}',yellow,add_d=lambda x:f'```\n{x}\n```')
for invitelistuser in data['fortnite']['invitelist']:
try:
user = await client.fetch_profile(invitelistuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l("invitelist_user_notfound",invitelistuser),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
friend = client.get_friend(user.id)
if not friend and user.id != client.user.id:
if data['fortnite']['addfriend']:
try:
await client.add_friend(friend.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
send(display_name,l("not_friend_with_inviteuser",invitelistuser,commands["reload"]),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.add_cache(user)
client.invitelist.append(user.id)
if data['loglevel'] == "debug":
send(display_name,f'invitelist {client.invitelist}',yellow,add_d=lambda x:f'```\n{x}\n```')
if data['fortnite']['acceptfriend']:
pendings = []
for pending in client.pending_friends.copy().values():
client.add_cache(pending)
if pending.incoming:
pendings.append(pending)
for pending in pendings:
if client.acceptfriend is True:
try:
await pending.accept()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['discord']['enabled']:
dclient_user = name(dclient.user)
activity = discord.Game(name=data['discord']['status'])
await dclient.change_presence(activity=activity)
for blacklistuser in data['discord']['blacklist']:
blacklistuser = int(blacklistuser)
user = dclient.get_user(blacklistuser)
if not user:
try:
user = await dclient.fetch_user(blacklistuser)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
user = None
if not user:
send(dclient_user,l('discord_blacklist_user_notfound', blacklistuser),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
else:
blacklist_.append(user.id)
if data['loglevel'] == "debug":
send(dclient_user,blacklist_,yellow,add_d=lambda x:f'```\n{x}\n```')
for whitelistuser in data['discord']['whitelist']:
whitelistuser = int(whitelistuser)
user = dclient.get_user(whitelistuser)
if not user:
try:
user = await dclient.fetch_user(whitelistuser)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
user = None
if not user:
send(dclient_user,l('discord_whitelist_user_notfound', whitelistuser),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
else:
whitelist_.append(user.id)
if data['loglevel'] == "debug":
send(dclient_user,whitelist_,yellow,add_d=lambda x:f'```\n{x}\n```')
dclient.owner = None
owner = dclient.get_user(int(data['discord']['owner']))
if not owner:
try:
owner = await dclient.fetch_user(int(data['discord']['owner']))
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l('error_while_requesting_userinfo'),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
if not owner:
send(dclient_user,l('discord_owner_notfound'),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
else:
dclient.owner = owner
send(dclient_user,f"{l('owner')}: {name(dclient.owner)}",green,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addblacklist'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addblacklist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(name).lower()) and user.id != client.user.id and user.id not in blacklist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(name) and user.id != client.user.id and user.id not in blacklist}
try:
user = await client.fetch_profile(rawcontent)
if user:
if user.id not in blacklist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', len(users)))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id not in blacklist:
blacklist.append(user.id)
if user.display_name:
data["fortnite"]["blacklist"].append(user.display_name)
else:
data["fortnite"]["blacklist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('add_to_list', f'{name(user)}', l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_to_list', f'{name(user)}', l('blacklist')))
else:
await reply(message, client, l('already_in_list', f'{name(user)}', l('blacklist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id not in blacklist:
blacklist.append(user.id)
if user.display_name:
data["fortnite"]["blacklist"].append(user.display_name)
else:
data["fortnite"]["blacklist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('add_to_list', f'{name(user)}', l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_to_list', f'{name(user)}', l('blacklist')))
else:
await reply(message, client, l('already_in_list', f'{name(user)}', l('blacklist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_add_to_list', l('blacklist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeblacklist'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removeblacklist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id in blacklist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id in blacklist}
try:
user = await client.fetch_profile(rawcontent)
if not user:
if user.id in blacklist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id in blacklist:
blacklist.remove(user.id)
try:
data["fortnite"]["blacklist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["blacklist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', name(user), l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_from_list', name(user), l('blacklist')))
else:
await reply(message, client, l('not_list', name(user), l('blacklist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id in blacklist:
blacklist.remove(user.id)
try:
data["fortnite"]["blacklist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["blacklist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', name(user), l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_from_list', name(user), l('blacklist')))
else:
await reply(message, client, l('not_list', name(user), l('blacklist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_from_list', l('blacklist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addwhitelist'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addwhitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id not in whitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id not in whitelist}
try:
user = await client.fetch_profile(rawcontent)
if user:
if user.id not in whitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id not in whitelist:
whitelist.append(user.id)
if user.display_name:
data["fortnite"]["whitelist"].append(str(user.display_name))
else:
data["fortnite"]["whitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["whitelist"] = data["fortnite"]["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l('whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('whitelist')))
else:
await reply(message, client, l("already_list", name(user), l('whitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id not in whitelist:
whitelist.append(user.id)
if user.display_name:
data["fortnite"]["whitelist"].append(str(user.display_name))
else:
data["fortnite"]["whitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["whitelist"] = data["fortnite"]["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l('whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('whitelist')))
else:
await reply(message, client, l("already_list", name(user), l('whitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_add_to_list', l('whitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removewhitelist'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removewhitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id in whitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id in whitelist}
try:
user = await client.fetch_profile(rawcontent)
if user:
if user.id in whitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id in whitelist:
whitelist.remove(user.id)
try:
data["whitelist"].remove(str(user.display_name))
except ValueError:
data["whitelist"].remove(user.id)
data_ = load_json("config.json")
data_["whitelist"] = data["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("whitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('whitelist')))
else:
await reply(message, client, l("not_list", name(user), l('whitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id in whitelist:
whitelist.remove(user.id)
try:
data["whitelist"].remove(str(user.display_name))
except ValueError:
data["whitelist"].remove(user.id)
data_ = load_json("config.json")
data_["whitelist"] = data["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("whitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('whitelist')))
else:
await reply(message, client, l("not_list", name(user), l('whitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_from_list', l('whitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addinvitelist'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addinvitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id not in client.invitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id not in client.invitelist}
try:
user = await client.fetch_profile(rawcontent)
if user:
if user.id not in client.invitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id not in client.invitelist:
client.invitelist.append(user.id)
if user.display_name:
data["fortnite"]["invitelist"].append(str(user.display_name))
else:
data["fortnite"]["invitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('invitelist')))
else:
await reply(message, client, l("already_list", name(user), l('invitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id not in client.invitelist:
client.invitelist.append(user.id)
if user.display_name:
data["fortnite"]["invitelist"].append(str(user.display_name))
else:
data["fortnite"]["invitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('invitelist')))
else:
await reply(message, client, l("already_list", name(user), l('invitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_add_to_list', l('invitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeinvitelist'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removeinvitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id in client.invitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id in client.invitelist}
try:
user = await client.fetch_profile(rawcontent)
if user:
if user.id in client.invitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id in client.invitelist:
client.invitelist.remove(user.id)
try:
data["fortnite"]["invitelist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["invitelist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('invitelist')))
else:
await reply(message, client, l("not_list", name(user), l('invitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id in client.invitelist:
client.invitelist.remove(user.id)
try:
data["fortnite"]["invitelist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["invitelist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('invitelist')))
else:
await reply(message, client, l("not_list", name(user), l('invitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_from_list', l('invitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['get'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['get']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l("user_not_in_party"))
return
send(display_name,f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
if data['loglevel'] == 'debug':
send(display_name,json.dumps(member.meta.schema, indent=2),yellow,add_d=lambda x:f'```\n{x}\n```',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}')
else:
client.select[message.author.id] = {
"exec": [
"""\
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l("user_not_in_party"))
return
send(display_name,f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
if data['loglevel'] == 'debug':
send(display_name,json.dumps(member.meta.schema, indent=2),yellow,add_d=lambda x:f'>>> {x}',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}')""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_get_userinfo')}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['friendcount'].split(','):
try:
send(display_name,f"{l('friendcount')}: {len(client.friends)}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('friendcount')}: {len(client.friends)}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['pendingcount'].split(','):
try:
outgoing = [i for i in client.pending_friends.values() if i.outgoing]
incoming = [i for i in client.pending_friends.values() if i.incoming]
send(display_name,f"{l('pendingcount')}: {len(client.pending_friends)}\n{l('outbound')}: {len(outgoing)}\n{l('inbound')}: {len(incoming)}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('pendingcount')}: {len(client.pending_friends)}\n{l('outbound')}: {len(outgoing)}\n{l('inbound')}: {len(incoming)}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['blockcount'].split(','):
try:
send(display_name,f"{l('blockcount')}: {len(client.blocked_users)}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('blockcount')}: {len(client.blocked_users)}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['friendlist'].split(','):
try:
text = ''
for friend in client.friends.values():
client.add_cache(friend)
text += f'\n{name(friend)}'
send(display_name,text,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{text}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['pendinglist'].split(','):
try:
outgoing = ''
incoming = ''
for pending in client.pending_friends.values():
client.add_cache(pending)
if pending.outgoing:
outgoing += f'\n{name(pending)}'
elif pending.incoming:
incoming += f'\n{name(pending)}'
send(display_name,f"{l('outbound')}: {outgoing}\n{l('inbound')}: {incoming}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('outbound')}: {outgoing}\n{l('inbound')}: {incoming}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['blocklist'].split(','):
try:
text = ''
for block in client.blocked_users.values():
client.add_cache(block)
text += f'\n{name(block)}'
send(display_name,text,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{text}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['wait'].split(','):
try:
if not client.acceptinvite:
if isinstance(message, fortnitepy.message.MessageBase) or isinstance(getattr(message,"base",None), fortnitepy.message.MessageBase):
if (not (message.author.id == getattr(client.owner,"id",None))
and not (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])
and not (message.author.id == getattr(dclient.owner,"id",None))
and not (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
await reply(message, client, l('invite_is_decline'))
return
client.acceptinvite = False
try:
client.timer_.cancel()
except AttributeError:
pass
client.timer_ = Timer(data['fortnite']['waitinterval'], client.inviteaccept)
client.timer_.start()
await reply(message, client, l('decline_invite_for', str(data['fortnite']['waitinterval'])))
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['join'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['join']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
else:
await friend.join_party()
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
else:
await friend.join_party()
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already_or_offline'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_notfound'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_private'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_joining_to_party'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"{l('enter_to_join_party')}"
await reply(message, client, text)
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already_or_offline'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_notfound'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_private'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_joining_to_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['joinid'].split(','):
try:
await client.join_to_party(party_id=args[1])
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_notfound'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_private'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['join']}] [{l('party_id')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['leave'].split(','):
try:
await client.party.me.leave()
await reply(message, client, l('party_leave', client.party.id))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_leaving_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['invite'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['invite']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.invite()
await reply(message, client, l('user_invited', name(friend), client.party.id))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.invite()
await reply(message, client, l('user_invited', name(friend), client.party.id))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_partyinvite'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_invite_user')}"
await reply(message, client, text)
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_partyinvite'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['inviteall'].split(','):
try:
[loop.create_task(client.party.invite(inviteuser)) for inviteuser in client.invitelist]
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['message'].split(','):
try:
text = rawcontent.split(' : ')
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if text[0] in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if text[0] in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_profile(text[0])
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.send(text[1])
await reply(message, client, l('user_sent', name(friend), text[1]))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.send(text[1])
await reply(message, client, l('user_sent', name(friend), text[1]))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))""" for user in users.values()
],
"variable": [
{"user": user, "text": text} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_send')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['message']}] [{l('name_or_id')}] : [{l('content')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['partymessage'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['partymessage']}] [{l('content')}]")
return
await client.party.send(rawcontent)
await reply(message, client, l('party_sent', client.party.id, rawcontent))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['sendall'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['sendall']}] [{l('content')}]")
return
tasks = {}
for client_ in loadedclients:
mes = AllMessage(rawcontent, message.author, client_, message)
task = loop.create_task(process_command(mes))
tasks[client_] = [task, mes]
await asyncio.gather(*[i[0] for i in tasks.values()])
for client_,list_ in tasks.items():
result = list_[1].result
if result.get(client_.user.id):
results = '\n'.join(result[client_.user.id])
await reply(message, client, f"[{name(client_.user)}] {results}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['status'].split(','):
try:
client.status = rawcontent
await reply(message, client, l('set_to', l('status'), rawcontent))
await client.party.set_privacy(client.party.privacy)
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['status']}] [{l('content')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['banner'].split(','):
try:
await client.party.me.edit_and_keep(partial(client.party.me.set_banner,args[1],args[2],client.party.me.banner[2]))
await reply(message, client, l('set_to', l('banner'), f"{args[1]}, {args[2]}"))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['banner']}] [{l('bannerid')}] [{l('color')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['level'].split(','):
try:
await client.party.me.edit_and_keep(partial(client.party.me.set_banner,client.party.me.banner[0],client.party.me.banner[1],int(args[1])))
await reply(message, client, l('set_to', l('level'), args[1]))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('must_be_int'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['level']}] [{l('level')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['bp'].split(','):
try:
await client.party.me.edit_and_keep(partial(client.party.me.set_battlepass_info,True,args[1],args[2],args[3]))
await reply(message, client, l('set_to', l('bpinfo'), f"{l('tier')}: {args[1]}, {l('xpboost')}: {args[2]}, {l('friendxpboost')}: {args[3]}"))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_bpinfo'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['bp']}] [{l('tier')}] [{l('xpboost')}] [{l('friendxpboost')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['privacy'].split(','):
try:
privacies = [
"privacy_public",
"privacy_friends_allow_friends_of_friends",
"privacy_friends",
"privacy_private_allow_friends_of_friends",
"privacy_private"
]
for privacy in privacies:
if args[1] in commands[privacy].split(','):
await client.party.set_privacy(getattr(fortnitepy.PartyPrivacy,privacy.replace("privacy_","",1).upper()))
await reply(message, client, l('set_to', l('privacy'), l(privacy.replace("privacy_","",1))))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['privacy']}] [[{commands['privacy_public']}] / [{commands['privacy_friends_allow_friends_of_friends']}] / [{commands['privacy_friends']}] / [{commands['privacy_private_allow_friends_of_friends']}] / [{commands['privacy_private']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getuser'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getuser']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id}
try:
user = await client.fetch_profile(rawcontent)
if user:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
text += f'\n{name(user)}'
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getfriend'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
friend = client.get_friend(user.id)
if not friend:
return
if not friend.nickname:
text += f'\n{str(friend.display_name)} / {friend.id}'
else:
text += f'\n{friend.nickname}({str(friend.display_name)}) / {friend.id}'
if friend.last_logout:
text += "\n{1}: {0.year}/{0.month}/{0.day} {0.hour}:{0.minute}:{0.second}".format(friend.last_logout, l('lastlogin'))
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getpending'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getpending']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_pending(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_pending(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.is_pending(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
pending = client.get_pending_friend(user.id)
if not pending:
return
text += f'\n{str(pending.display_name)} / {pending.id}'
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getblock'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getblock']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_blocked(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_blocked(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.is_blocked(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
block = client.get_blocked_user(user.id)
if not block:
return
text += f'\n{str(block.display_name)} / {block.id}'
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['info'].split(','):
try:
if args[1] in commands['info_party'].split(','):
text = str()
text += f"{client.party.id}\n{l('member_count')}: {client.party.member_count}\n{client.party.playlist_info[0]}"
for member in client.party.members.copy().values():
client.add_cache(member)
if data['loglevel'] == 'normal':
text += f'\n{str(member.display_name)}'
else:
text += f'\n{str(member.display_name)} / {member.id}'
send(display_name,text)
await reply(message, client, text)
if data['loglevel'] == 'debug':
send(display_name,json.dumps(client.party.meta.schema,indent=4),yellow,add_d=lambda x:f'```\n{x}\n```')
elif True in [args[1] in commands[key].split(',') for key in ("cid", "bid", "petcarrier", "pickaxe_id", "eid", "emoji_id", "toy_id", "id")]:
type_ = convert_to_type(args[1])
if rawcontent2 == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [ID]")
return
result = await loop.run_in_executor(None, search_item, data["search-lang"], "id", rawcontent2, type_)
if not result and data["search-lang"] != "en":
result = await loop.run_in_executor(None, search_item, "en", "id", rawcontent2, type_)
if not result:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
await reply(message, client, f"{convert_backend_type(result[0]['backendType'])}: {result[0]['name']} | {result[0]['id']}\n{result[0]['description']}\n{result[0]['rarity']}\n{result[0]['set']}")
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_show_info')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
await reply(message, client, f"{convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}\n{item['description']}\n{item['rarity']}\n{item['set']}")""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
elif True in [args[1] in commands[key].split(',') for key in ("outfit", "backpack", "pet", "pickaxe", "emote", "emoji", "toy", "item")]:
type_ = convert_to_type(args[1])
if rawcontent2 == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [{l('itemname')}]")
return
result = await loop.run_in_executor(None, search_item, data["search-lang"], "name", rawcontent2, type_)
if not result and data["search-lang"] != "en":
result = await loop.run_in_executor(None, search_item, "en", "name", rawcontent2, type_)
if not result:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
await reply(message, client, f"{convert_backend_type(result[0]['backendType'])}: {result[0]['name']} | {result[0]['id']}\n{result[0]['description']}\n{result[0]['rarity']}\n{result[0]['set']}")
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_show_info')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
await reply(message, client, f"{convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}\n{item['description']}\n{item['rarity']}\n{item['set']}")""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['info']}] [[{commands['info_party']}] / [{commands['item']}] / [{commands['id']}] / [{commands['outfit']}] / [{commands['backpack']}] / [{commands['pickaxe']}] / [{commands['emote']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['pending'].split(','):
try:
pendings = []
for pending in client.pending_friends.values():
client.add_cache(pending)
if pending.incoming:
pendings.append(pending)
if args[1] in commands['true'].split(','):
for pending in pendings:
try:
await pending.accept()
await reply(message, client, l('add_friend', f'{str(pending.display_name)} / {pending.id}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_friendrequest'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
elif args[1] in commands['false'].split(','):
for pending in pendings:
try:
await pending.decline()
await reply(message, client, l('friend_request_decline', f'{str(pending.display_name)} / {pending.id}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_declining_friendrequest'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['pending']}] [[{commands['true']}] / [{commands['false']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removepending'].split(','):
try:
pendings = []
for pending in client.pending_friends.values():
client.add_cache(pending)
if pending.outgoing:
pendings.append(pending)
for pending in pendings:
try:
await pending.cancel()
await reply(message, client, l('remove_pending', f'{str(pending.display_name)} / {pending.id}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friendrequest'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addfriend'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and not client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and not client.has_friend(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if not client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache( user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if client.has_friend(user.id):
await reply(message, client, l('already_friend'))
return
await client.add_friend(user.id)
await reply(message, client, l('friend_request_to', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if client.has_friend(user.id):
await reply(message, client, l('already_friend'))
return
await client.add_friend(user.id)
await reply(message, client, l('friend_request_to', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_friendrequest'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_send_friendrequest')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_friendrequest'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removefriend'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removefriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.has_friend(user.id):
await reply(message, client, l('not_friend_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('remove_friend', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.has_friend(user.id):
await reply(message, client, l('not_friend_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('remove_friend', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend')""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_friend')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeallfriend'].split(','):
try:
friend_count = len(client.friends)
await client.remove_all_friends()
await reply(message, client, l('remove_allfriend',friend_count))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend'))
except Exception:
send(name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['acceptpending'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['acceptpending']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_pending(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_pending(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.is_pending(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.is_pending(user.id):
await reply(message, client, l('not_pending_with_user'))
return
await client.accept_friend(user.id)
await reply(message, client, l('friend_add', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.is_pending(user.id):
await reply(message, client, l('not_pending_with_user'))
return
await client.accept_friend(user.id)
await reply(message, client, l('friend_add', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_accepting_friendrequest'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_accept_pending')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_accepting_friendrequest'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['declinepending'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['declinepending']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_pending(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_pending(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.is_pending(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.is_pending(user.id):
await reply(message, client, l('nor_pending_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('friend_request_decline', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.is_pending(user.id):
await reply(message, client, l('nor_pending_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('friend_request_decline', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_declining_friendrequest'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_decline_pending')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_declining_friendrequest'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['blockfriend'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['blockfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and not client.is_blocked(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and not client.is_blocked(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if not client.is_blocked(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if client.is_blocked(user.id):
await reply(message, client, l('already_block'))
return
await client.block_user(user.id)
await reply(message, client, l('block_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if client.is_blocked(user.id):
await reply(message, client, l('already_block'))
return
await client.block_user(user.id)
await reply(message, client, l('block_user', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_blocking_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_block_user')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_blocking_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['unblockfriend'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['unblockfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_blocked(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_blocked(user.id)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.is_blocked(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.is_blocked(user.id):
await reply(message, client, l('not_block'))
return
await client.unblock_user(user.id)
await reply(message, client, l('unblock_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.is_blocked(user.id):
await reply(message, client, l('not_block'))
return
await client.unblock_user(user.id)
await reply(message, client, l('unblock_user', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_unblocking_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_unblock_user')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_unblocking_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['chatban'].split(','):
try:
reason = rawcontent.split(' : ')
if rawcontent == '':
await reply(message, client, f"[{commands['chatban']}] [{l('name_or_id')}] : [{l('reason')}({l('optional')})]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
try:
await member.chatban(reason[1])
except IndexError:
await member.chatban()
await reply(message, client, l('chatban_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
try:
await member.chatban(reason[1])
except IndexError:
await member.chatban()
await reply(message, client, l('chatban_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('nor_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_chatban'))""" for user in users.values()
],
"variable": [
{"user": user, "reason": reason} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_chatban')}"
await reply(message, client, text)
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('nor_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_chatban'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['promote'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['promote']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.promote()
await reply(message, client, l('promote_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.promote()
await reply(message, client, l('promote_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_party_leader'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_promoting_party_leader'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_promote_user')}"
await reply(message, client, text)
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_party_leader'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_promoting_party_leader'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['kick'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['kick']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.kick()
await reply(message, client, l('kick_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.kick()
await reply(message, client, l('kick_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('cant_kick_yourself'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_kicking_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_kick_user')}"
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('cant_kick_yourself'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_kicking_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['hide'].split(','):
try:
if rawcontent == '':
await client.hide()
await reply(message, client, l('hide_all_user'))
else:
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.hide(member.id)
await reply(message, client, l('hide_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.hide(member.id)
await reply(message, client, l('hide_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_hide_user')}"
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['show'].split(','):
try:
if rawcontent == '':
await client.show()
await reply(message, client, l('show_all_user'))
else:
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.show(member.id)
await reply(message, client, l('show_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.show(member.id)
await reply(message, client, l('show_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_show_user')}"
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['ready'].split(','):
try:
await client.party.me.set_ready(fortnitepy.ReadyState.READY)
await reply(message, client, l('set_to', l('readystate'), l('ready')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['unready'].split(','):
try:
await client.party.me.set_ready(fortnitepy.ReadyState.NOT_READY)
await reply(message, client, l('set_to', l('readystate'), l('unready')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['sitout'].split(','):
try:
await client.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT)
await reply(message, client, l('set_to', l('readystate'), l('sitout')))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['match'].split(','):
try:
await client.party.me.set_in_match(players_left=int(args[1]) if args[1:2] else 100)
await reply(message, client, l('set_to', l('matchstate'), l('remaining', args[1] if args[1:2] else "100")))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('remaining_must_be_between_0_and_255'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['unmatch'].split(','):
try:
await client.party.me.clear_in_match()
await reply(message, client, l('set_to', l('matchstate'), l('off')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['swap'].split(','):
try:
if rawcontent == '':
await reply(message, client, f"[{commands['swap']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members.values() if content_ in str(member.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
if client.party.members.get(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.swap_position()
await reply(message, client, l('swap_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.members.get(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.swap_position()
await reply(message, client, l('swap_user', f'{name(user)}}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_swapping_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_swap_user')}"
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_swapping_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['stop'].split(','):
try:
client.stopcheck = True
if await client.change_asset(message.author.id, "Emote", ""):
await reply(message, client, l('stopped'))
else:
await reply(message, client, l('locked'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['setenlightenment'].split(','):
try:
if await client.change_asset(message.author.id, "Outfit", client.party.me.outfit, client.party.me.outfit_variants,(args[1],args[2])) is True:
await reply(message, client, l('set_to', 'enlightenment', f'{args[1]}, {args[2]}'))
else:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['setenlightenment']}] [{l('number')}] [{l('number')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addeditems'].split(','):
try:
async with aiohttp.ClientSession() as session:
res = await session.get("https://benbotfn.tk/api/v1/newCosmetics")
res = await res.json()
items = res["items"]
for item in items:
if client.stopcheck:
client.stopcheck = False
break
if convert_backend_type(item["backendType"]) in ignoretype:
continue
await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item["id"])
await asyncio.sleep(5)
else:
await reply(message, client, l('all_end', l('addeditem')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key].split(',') for key in ("alloutfit", "allbackpack", "allpet", "allpickaxe", "allemote", "allemoji", "alltoy")]:
type_ = convert_to_type(args[0])
try:
flag = False
if getattr(client,f"{convert_to_old_type(type_)}lock"):
flag = client.lock_check(message.author.id)
if flag:
await reply(message, client, l('locked'))
return
with open(f'items/{type_}_{data["search-lang"]}.json', 'r', encoding='utf-8') as f:
allitem = json.load(f)
for item in allitem:
if client.stopcheck:
client.stopcheck = False
break
await client.change_asset(message.author.id, type_, item["id"])
await asyncio.sleep(2)
else:
await reply(message, client, l('all_end', l(convert_to_old_type(type_))))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key].split(',') for key in ("cid", "bid", "petcarrier", "pickaxe_id", "eid", "emoji_id", "toy_id", "id")]:
type_ = convert_to_type(args[0])
if rawcontent == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [ID]")
return
try:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "id", rawcontent, type_)
if result is None and data["search-lang"] != "en":
result = await loop.run_in_executor(None, search_item, "en", "id", rawcontent, type_)
if result is None:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]['backendType']), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item['backendType']), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key].split(',') for key in ("outfit", "backpack", "pet", "pickaxe", "emote", "emoji", "toy", "item")]:
type_ = convert_to_type(args[0])
if rawcontent == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [{l('itemname')}]")
return
try:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "name", rawcontent, type_)
if result is None and data["search-lang"] != "en":
result = await loop.run_in_executor(None, search_item, "en", "name", rawcontent, type_)
if result is None:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]['backendType']), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item['backendType']), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['set'].split(','):
if rawcontent == '':
await reply(message, client, f"[{commands['set']}] [{l('setname')}]")
return
try:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "set", rawcontent)
if result is None and data["search-lang"] != "en":
result = await loop.run_in_executor(None, search_item, "en", "set", rawcontent)
if result is None:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]["backendType"]), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['set']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}({result[0]['set']})")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {result[0]['set']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}({result[0]['set']})"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['set']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}({item['set']})")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item}
]
}
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['setstyle'].split(','):
try:
if True not in [args[1] in commands[key].split(',') for key in ("outfit", "backpack", "pickaxe")]:
await reply(message, client, f"[{commands['setstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pickaxe']}]]")
return
type_ = convert_to_asset(args[1])
id_ = member_asset(client.party.me, type_)
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
result = await loop.run_in_executor(None, search_style, data["search-lang"], id_, type_)
if result is None:
await reply(message, client, l('no_stylechange'))
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {item['name']}"
text += f"\n{l('enter_to_set_style')}"
await reply(message, client, text)
client.select[message.author.id] = {"exec": [f"await client.change_asset('{message.author.id}', '{type_}', '{id_}', {variants['variants']})" for variants in result]}
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['setstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addstyle'].split(','):
try:
if True not in [args[1] in commands[key].split(',') for key in ("outfit", "backpack", "pickaxe")]:
await reply(message, client, f"[{commands['addstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pickaxe']}]]")
return
type_ = convert_to_asset(args[1])
id_ = member_asset(client.party.me, type_)
variants_ = eval(f"client.party.me.{type_}_variants")
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
result = await loop.run_in_executor(None, search_style, data["search-lang"], id_, type_)
if result is None:
await reply(message, client, l('no_stylechange'))
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {item['name']}"
text += f"\n{l('enter_to_set_style')}"
await reply(message, client, text)
client.select[message.author.id] = {"exec": [f"await client.change_asset('{message.author.id}', '{type_}', '{id_}', {variants_} + {variants['variants']})" for variants in result]}
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['addstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['setvariant'].split(','):
try:
if True not in [args[1] in commands[key].split(',') for key in ("outfit", "backpack", "pet", "pickaxe")]:
await reply(message, client, f"[{commands['setvariant']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
return
variantdict={}
for count,text in enumerate(args[2:]):
if count % 2 != 0:
continue
try:
variantdict[text]=args[count+3]
except IndexError:
break
type_ = convert_to_type(args[1])
id_ = member_asset(client.party.me, convert_to_asset(args[1]))
variants = client.party.me.create_variants(item='AthenaCharacter',**variantdict)
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
if await client.change_asset(message.author.id, type_, id_, variants, client.party.me.enlightenments) is False:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['setvariant']}] [ID] [variant] [{l('number')}]")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addvariant'].split(','):
try:
if True not in [args[1] in commands[key].split(',') for key in ("outfit", "backpack", "pet", "pickaxe")]:
await reply(message, client, f"[{commands['addvariant']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
return
variantdict={}
for count,text in enumerate(args[2:]):
if count % 2 != 0:
continue
try:
variantdict[text]=args[count+3]
except IndexError:
break
type_ = convert_to_type(args[1])
id_ = member_asset(client.party.me, convert_to_asset(args[1]))
variants = client.party.me.create_variants(item='AthenaCharacter',**variantdict)
variants += eval(f"client.party.me.{convert_to_asset(args[1])}_variants")
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
if await client.change_asset(message.author.id, type_, id_, variants, client.party.me.enlightenments) is False:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['addvariant']}] [ID] [variant] [{l('number')}]")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key].split(',') for key in ("outfitasset", "backpackasset", "pickaxeasset", "emoteasset")]:
type_ = convert_to_type(args[0])
try:
if rawcontent == '':
await reply(message, client, f"[{commands[f'{convert_to_old_type(type_)}asset']}] [{l('assetpath')}]")
return
if await client.change_asset(message.author.id, type_, rawcontent) is False:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0].lower().startswith(id_) for id_ in ("cid_", "bid_", "petcarrier_", "pickaxe_id_", "eid_", "emoji_", "toy_")]:
try:
type_ = convert_to_type(args[0])
if not await client.change_asset(message.author.id, type_, args[0]):
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0].lower().startswith('playlist_'):
try:
await client.party.set_playlist(args[0])
await reply(message, client, l('set_playlist', args[0]))
data['fortnite']['playlist']=args[0]
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
else:
keys = {
"outfitmimic": ["outfitmimic", l('mimic', l("outfit"))],
"backpackmimic": ["backpackmimic", l('mimic', l("backpack"))],
"pickaxemimic": ["pickaxemimic", l('mimic', l("pickaxe"))],
"emotemimic": ["emotemimic", l('mimic', l("emote"))]
}
for key,value in keys.items():
if args[0] in commands[key].split(','):
try:
if args[1] in commands['true'].split(','):
setattr(client,value[0],True)
send(display_name,l('set_to', value[1], l('on')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('on')))
elif args[1] in commands['false'].split(','):
setattr(client,value[0],False)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('off')))
else:
if data['caseinsensitive']:
users = {str(user.display_name): user for user in client.party.members.values() if content_ in jaconv.kata2hira(str(user.display_name).lower())}
else:
users = {str(user.display_name): user for user in client.party.members.values() if content_ in str(user.display_name)}
try:
user = await client.fetch_profile(rawcontent)
if user:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
setattr(client,value[0],user.id)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], name(user)))
else:
client.select[message.author.id] = {
"exec": [
"""\
setattr(client,value[0],user.id)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], name(user)))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_mimic_user')}"
await reply(message, client, text)
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands[key]}] [[{commands['true']}] / [{commands['false']}] / {l('name_or_id')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
keys = {
"outfitlock": ["outfitlock", l('lock', l("outfit"))],
"backpacklock": ["backpacklock", l('lock', l("backpack"))],
"pickaxelock": ["pickaxelock", l('lock', l("pickaxe"))],
"emotelock": ["emotelock", l('lock', l("emote"))],
"whisper": ["whisper", l('command_from', l('whisper'))],
"partychat": ["partychat", l('command_from', l('partychat'))],
"discord": ["discord", l('command_from', l('discord'))],
"web": ["web", l('command_from', l('web'))],
"disablewhisperperfectly": ["whisperperfect", l('disable_perfect', l('whisper'))],
"disablepartychatperfectly": ["partychatperfect", l('disable_perfect', l('partychat'))],
"disablediscordperfectly": ["discordperfect", l('disable_perfect', l('discord'))],
"acceptinvite": ["acceptinvite", l('invite')],
"acceptfriend": ["acceptfriend", l('friend_request')],
"joinmessageenable": ["joinmessageenable", l('join_', l('message'))],
"randommessageenable": ["randommessageenable", l('join_', l('randommessage'))]
}
for key,value in keys.items():
if args[0] in commands[key].split(','):
try:
if args[1] in commands['true'].split(','):
setattr(client,value[0],True)
send(display_name,l('set_to', value[1], l('on')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('on')))
elif args[1] in commands['false'].split(','):
setattr(client,value[0],False)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('off')))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['randommessageenable']}] [[{commands['true']}] / [{commands['false']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
if ': ' in message.content:
return
if content.isdigit() and client.select.get(message.author.id):
try:
if int(args[0]) == 0:
await reply(message, client, l('please_enter_valid_number'))
return
exec_ = client.select[message.author.id]["exec"][int(args[0])-1]
variable = globals()
variable.update(locals())
if client.select[message.author.id].get("variable"):
variable.update(client.select[message.author.id]["variable"][int(args[0])-1])
await aexec(exec_, variable)
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('please_enter_valid_number'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
else:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "name", content, "Item")
if not result and data["search-lang"] != "en":
result = await loop.run_in_executor(None, search_item, "en", "name", content, "Item")
if result:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]["backendType"]), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
bot_ready = True
first_boot = True
filename = 'device_auths.json'
web_text = ''
cache_users = {}
cache_items = {}
cache_banners = {}
client_name = {}
ignoretype = [
"Contrail",
"Glider",
"Wrap",
"Loading Screen",
"Music",
"Spray",
"Battle Bus"
]
clients = []
loadedclients = []
whitelist = []
whitelist_ = []
blacklist = []
blacklist_ = []
otherbotlist = []
storedlogs = []
config_tags={
"['fortnite']": [dict],
"['fortnite']['email']": [str,"can_be_multiple"],
"['fortnite']['owner']": [str],
"['fortnite']['platform']": [str,"select_platform"],
"['fortnite']['cid']": [str],
"['fortnite']['bid']": [str],
"['fortnite']['pickaxe_id']": [str],
"['fortnite']['eid']": [str],
"['fortnite']['playlist']": [str],
"['fortnite']['banner']": [str],
"['fortnite']['banner_color']": [str],
"['fortnite']['avatar_id']": [str],
"['fortnite']['avatar_color']": [str,"can_be_multiple"],
"['fortnite']['level']": [int],
"['fortnite']['tier']": [int],
"['fortnite']['xpboost']": [int],
"['fortnite']['friendxpboost']": [int],
"['fortnite']['status']": [str],
"['fortnite']['privacy']": [str,"select_privacy"],
"['fortnite']['whisper']": [bool_,"select_bool"],
"['fortnite']['partychat']": [bool_,"select_bool"],
"['fortnite']['disablewhisperperfectly']": [bool_,"select_bool"],
"['fortnite']['disablepartychatperfectly']": [bool_,"select_bool"],
"['fortnite']['ignorebot']": [bool_,"select_bool"],
"['fortnite']['joinmessage']": [str],
"['fortnite']['randommessage']": [str,"can_be_multiple"],
"['fortnite']['joinmessageenable']": [bool_,"select_bool"],
"['fortnite']['randommessageenable']": [bool_,"select_bool"],
"['fortnite']['joinemote']": [bool_,"select_bool"],
"['fortnite']['outfitmimic']": [bool_,"select_bool"],
"['fortnite']['backpackmimic']": [bool_,"select_bool"],
"['fortnite']['pickaxemimic']": [bool_,"select_bool"],
"['fortnite']['emotemimic']": [bool_,"select_bool"],
"['fortnite']['mimic-ignorebot']": [bool_,"select_bool"],
"['fortnite']['mimic-ignoreblacklist']": [bool_,"select_bool"],
"['fortnite']['acceptinvite']": [bool_,"select_bool"],
"['fortnite']['acceptfriend']": [bool_none,"select_bool_none"],
"['fortnite']['addfriend']": [bool_,"select_bool"],
"['fortnite']['invite-ownerdecline']": [bool_,"select_bool"],
"['fortnite']['inviteinterval']": [bool_,"select_bool"],
"['fortnite']['interval']": [int],
"['fortnite']['waitinterval']": [int],
"['fortnite']['hide-user']": [bool_,"select_bool"],
"['fortnite']['hide-blacklist']": [bool_,"select_bool"],
"['fortnite']['show-owner']": [bool_,"select_bool"],
"['fortnite']['show-whitelist']": [bool_,"select_bool"],
"['fortnite']['show-bot']": [bool_,"select_bool"],
"['fortnite']['blacklist']": [list,"can_be_multiple"],
"['fortnite']['blacklist-declineinvite']": [bool_,"select_bool"],
"['fortnite']['blacklist-autoblock']": [bool_,"select_bool"],
"['fortnite']['blacklist-autokick']": [bool_,"select_bool"],
"['fortnite']['blacklist-autochatban']": [bool_,"select_bool"],
"['fortnite']['blacklist-ignorecommand']": [bool_,"select_bool"],
"['fortnite']['whitelist']": [list,"can_be_multiple"],
"['fortnite']['whitelist-allowinvite']": [bool_,"select_bool"],
"['fortnite']['whitelist-declineinvite']": [bool_,"select_bool"],
"['fortnite']['whitelist-ignorelock']": [bool_,"select_bool"],
"['fortnite']['whitelist-ownercommand']": [bool_,"select_bool"],
"['fortnite']['whitelist-ignoreng']": [bool_,"select_bool"],
"['fortnite']['invitelist']": [list,"can_be_multiple"],
"['fortnite']['otherbotlist']": [list,"can_be_multiple"],
"['discord']": [dict],
"['discord']['enabled']": [bool_,"select_bool"],
"['discord']['token']": [str],
"['discord']['owner']": [int],
"['discord']['channels']": [list,"can_be_multiple"],
"['discord']['status']": [str],
"['discord']['discord']": [bool_,"select_bool"],
"['discord']['disablediscordperfectly']": [bool_,"select_bool"],
"['discord']['ignorebot']": [bool_,"select_bool"],
"['discord']['blacklist']": [list,"can_be_multiple"],
"['discord']['blacklist-ignorecommand']": [bool_,"select_bool"],
"['discord']['whitelist']": [list,"can_be_multiple"],
"['discord']['whitelist-ignorelock']": [bool_,"select_bool"],
"['discord']['whitelist-ownercommand']": [bool_,"select_bool"],
"['discord']['whitelist-ignoreng']": [bool_,"select_bool"],
"['web']": [dict],
"['web']['enabled']": [bool_,"select_bool"],
"['web']['ip']": [str],
"['web']['port']": [int],
"['web']['password']": [str],
"['web']['login_required']": [bool_,"select_bool"],
"['web']['web']": [bool_,"select_bool"],
"['web']['log']": [bool_,"select_bool"],
"['replies-matchmethod']": [str,"select_matchmethod"],
"['ng-words']": [list,"can_be_multiple"],
"['ng-word-matchmethod']": [str,"select_matchmethod"],
"['ng-word-kick']": [bool_,"select_bool"],
"['ng-word-chatban']": [bool_,"select_bool"],
"['ng-word-blacklist']": [bool_,"select_bool"],
"['restart_in']": [int],
"['search_max']": [int],
"['lang']": [str,"select_lang"],
"['search-lang']": [str,"select_ben_lang"],
"['no-logs']": [bool_,"select_bool"],
"['ingame-error']": [bool_,"select_bool"],
"['discord-log']": [bool_,"select_bool"],
"['omit-over2000']": [bool_,"select_bool"],
"['skip-if-overflow']": [bool_,"select_bool"],
"['hide-email']": [bool_,"select_bool"],
"['hide-token']": [bool_,"select_bool"],
"['hide-webhook']": [bool_,"select_bool"],
"['webhook']": [str],
"['caseinsensitive']": [bool_,"select_bool"],
"['loglevel']": [str,"select_loglevel"],
"['debug']": [bool_,"select_bool"]
}
commands_tags={
"['ownercommands']": [str,"can_be_multiple"],
"['true']": [str,"can_be_multiple"],
"['false']": [str,"can_be_multiple"],
"['me']": [str,"can_be_multiple"],
"['prev']": [str,"can_be_multiple"],
"['eval']": [str,"can_be_multiple"],
"['exec']": [str,"can_be_multiple"],
"['restart']": [str,"can_be_multiple"],
"['relogin']": [str,"can_be_multiple"],
"['reload']": [str,"can_be_multiple"],
"['addblacklist']": [str,"can_be_multiple"],
"['removeblacklist']": [str,"can_be_multiple"],
"['addwhitelist']": [str,"can_be_multiple"],
"['removewhitelist']": [str,"can_be_multiple"],
"['addblacklist_discord']": [str,"can_be_multiple"],
"['removeblacklist_discord']": [str,"can_be_multiple"],
"['addwhitelist_discord']": [str,"can_be_multiple"],
"['removewhitelist_discord']": [str,"can_be_multiple"],
"['addinvitelist']": [str,"can_be_multiple"],
"['removeinvitelist']": [str,"can_be_multiple"],
"['get']": [str,"can_be_multiple"],
"['friendcount']": [str,"can_be_multiple"],
"['pendingcount']": [str,"can_be_multiple"],
"['blockcount']": [str,"can_be_multiple"],
"['friendlist']": [str,"can_be_multiple"],
"['pendinglist']": [str,"can_be_multiple"],
"['blocklist']": [str,"can_be_multiple"],
"['outfitmimic']": [str,"can_be_multiple"],
"['backpackmimic']": [str,"can_be_multiple"],
"['pickaxemimic']": [str,"can_be_multiple"],
"['emotemimic']": [str,"can_be_multiple"],
"['whisper']": [str,"can_be_multiple"],
"['partychat']": [str,"can_be_multiple"],
"['discord']": [str,"can_be_multiple"],
"['web']": [str,"can_be_multiple"],
"['disablewhisperperfectly']": [str,"can_be_multiple"],
"['disablepartychatperfectly']": [str,"can_be_multiple"],
"['disablediscordperfectly']": [str,"can_be_multiple"],
"['acceptinvite']": [str,"can_be_multiple"],
"['acceptfriend']": [str,"can_be_multiple"],
"['joinmessageenable']": [str,"can_be_multiple"],
"['randommessageenable']": [str,"can_be_multiple"],
"['wait']": [str,"can_be_multiple"],
"['join']": [str,"can_be_multiple"],
"['joinid']": [str,"can_be_multiple"],
"['leave']": [str,"can_be_multiple"],
"['invite']": [str,"can_be_multiple"],
"['inviteall']": [str,"can_be_multiple"],
"['message']": [str,"can_be_multiple"],
"['partymessage']": [str,"can_be_multiple"],
"['sendall']": [str,"can_be_multiple"],
"['status']": [str,"can_be_multiple"],
"['avatar']": [str,"can_be_multiple"],
"['banner']": [str,"can_be_multiple"],
"['level']": [str,"can_be_multiple"],
"['bp']": [str,"can_be_multiple"],
"['privacy']": [str,"can_be_multiple"],
"['privacy_public']": [str,"can_be_multiple"],
"['privacy_friends_allow_friends_of_friends']": [str,"can_be_multiple"],
"['privacy_friends']": [str,"can_be_multiple"],
"['privacy_private_allow_friends_of_friends']": [str,"can_be_multiple"],
"['privacy_private']": [str,"can_be_multiple"],
"['getuser']": [str,"can_be_multiple"],
"['getfriend']": [str,"can_be_multiple"],
"['getpending']": [str,"can_be_multiple"],
"['getblock']": [str,"can_be_multiple"],
"['info']": [str,"can_be_multiple"],
"['info_party']": [str,"can_be_multiple"],
"['pending']": [str,"can_be_multiple"],
"['removepending']": [str,"can_be_multiple"],
"['addfriend']": [str,"can_be_multiple"],
"['removefriend']": [str,"can_be_multiple"],
"['removeallfriend']": [str,"can_be_multiple"],
"['acceptpending']": [str,"can_be_multiple"],
"['declinepending']": [str,"can_be_multiple"],
"['blockfriend']": [str,"can_be_multiple"],
"['unblockfriend']": [str,"can_be_multiple"],
"['chatban']": [str,"can_be_multiple"],
"['promote']": [str,"can_be_multiple"],
"['kick']": [str,"can_be_multiple"],
"['hide']": [str,"can_be_multiple"],
"['show']": [str,"can_be_multiple"],
"['ready']": [str,"can_be_multiple"],
"['unready']": [str,"can_be_multiple"],
"['sitout']": [str,"can_be_multiple"],
"['match']": [str,"can_be_multiple"],
"['unmatch']": [str,"can_be_multiple"],
"['swap']": [str,"can_be_multiple"],
"['outfitlock']": [str,"can_be_multiple"],
"['backpacklock']": [str,"can_be_multiple"],
"['pickaxelock']": [str,"can_be_multiple"],
"['emotelock']": [str,"can_be_multiple"],
"['stop']": [str,"can_be_multiple"],
"['addeditems']": [str,"can_be_multiple"],
"['alloutfit']": [str,"can_be_multiple"],
"['allbackpack']": [str,"can_be_multiple"],
"['allpet']": [str,"can_be_multiple"],
"['allpickaxe']": [str,"can_be_multiple"],
"['allemote']": [str,"can_be_multiple"],
"['allemoji']": [str,"can_be_multiple"],
"['alltoy']": [str,"can_be_multiple"],
"['cid']": [str,"can_be_multiple"],
"['bid']": [str,"can_be_multiple"],
"['petcarrier']": [str,"can_be_multiple"],
"['pickaxe_id']": [str,"can_be_multiple"],
"['eid']": [str,"can_be_multiple"],
"['emoji_id']": [str,"can_be_multiple"],
"['toy_id']": [str,"can_be_multiple"],
"['id']": [str,"can_be_multiple"],
"['outfit']": [str,"can_be_multiple"],
"['backpack']": [str,"can_be_multiple"],
"['pet']": [str,"can_be_multiple"],
"['pickaxe']": [str,"can_be_multiple"],
"['emote']": [str,"can_be_multiple"],
"['emoji']": [str,"can_be_multiple"],
"['toy']": [str,"can_be_multiple"],
"['item']": [str,"can_be_multiple"],
"['set']": [str,"can_be_multiple"],
"['setvariant']": [str,"can_be_multiple"],
"['addvariant']": [str,"can_be_multiple"],
"['setstyle']": [str,"can_be_multiple"],
"['addstyle']": [str,"can_be_multiple"],
"['setenlightenment']": [str,"can_be_multiple"],
"['outfitasset']": [str,"can_be_multiple"],
"['backpackasset']": [str,"can_be_multiple"],
"['pickaxeasset']": [str,"can_be_multiple"],
"['emoteasset']": [str,"can_be_multiple"]
}
localize_keys = [
'bot',
'lobbybot',
'credit',
'library',
'loglevel',
'normal',
'info',
'debug',
'debug_is_on',
'on',
'off',
'booting',
'get_code',
'authorization_expired',
'waiting_for_authorization',
'account_incorrect',
'login',
'all_login',
'relogin',
'closing',
'owner',
'party',
'userid',
'name_or_id',
'partyid',
'content',
'number',
'eval',
'exec',
'invite_is_decline',
'restarting',
'relogining',
'success',
'accepted_invite_from',
'accepted_invite_from2',
'declined_invite_from',
'declined_invite_from2',
'declined_invite_interval',
'declined_invite_interval2',
'declined_invite_interval3',
'declined_invite_owner',
'declined_invite_owner2',
'declined_invite_owner3',
'declined_invite_whitelist',
'declined_invite_whitelist2',
'declined_invite_whitelist3',
'party_member_joined',
'party_member_left',
'party_member_request',
'party_member_kick',
'party_member_promote',
'party_member_update',
'party_member_disconnect',
'party_member_chatban',
'party_member_chatban2',
'party_update',
'random_message',
'click_invite',
'inviteaccept',
'inviteinterval',
'invite_from',
'invite_from2',
'friend_request_to',
'friend_request_from',
'friend_request_decline',
'friend_accept',
'friend_add',
'friend_remove',
'this_command_owneronly',
'failed_ownercommand',
'error_while_accepting_partyrequest',
'error_while_declining_partyrequest',
'error_while_accepting_friendrequest',
'error_while_declining_friendrequest',
'error_while_sending_friendrequest',
'error_while_removing_friendrequest',
'error_while_removing_friend',
'error_while_accepting_invite',
'error_while_declining_invite',
'error_while_blocking_user',
'error_while_unblocking_user',
'error_while_requesting_userinfo',
'error_while_joining_to_party',
'error_while_leaving_party',
'error_while_sending_partyinvite',
'error_while_changing_asset',
'error_while_changing_bpinfo',
'error_while_promoting_party_leader',
'error_while_kicking_user',
'error_while_swapping_user',
'error_while_setting_client',
'error_already_member_of_party',
'error_netcl_does_not_match',
'error_private_party',
'login_failed',
'failed_to_load_account',
'exchange_code_error',
'password_reset_error',
'api_downing',
'api_downing2',
'not_enough_password',
'owner_notfound',
'discord_owner_notfound',
'blacklist_user_notfound',
'whitelist_user_notfound',
'discord_blacklist_user_notfound',
'discord_whitelist_user_notfound',
'botlist_user_notfound',
'invitelist_user_notfound',
'not_friend_with_owner',
'not_friend_with_inviteuser',
'not_friend_with_user',
'nor_pending_with_user',
'not_party_leader',
'load_failed_keyerror',
'load_failed_json',
'load_failed_notfound',
'is_missing',
'too_many_users',
'too_many_items',
'user_notfound',
'user_not_in_party',
'party_full_or_already_or_offline',
'party_full_or_already',
'party_notfound',
'party_private',
'not_available',
'must_be_int',
'item_notfound',
'error',
'add_to_list',
'already_list',
'remove_from_list',
'not_list',
'enter_to_add_to_list',
'enter_to_remove_from_list',
'blacklist',
'whitelist',
'discord_blacklist',
'discord_whitelist',
'invitelist',
'botlist',
'enter_to_get_userinfo',
'friendcount',
'pendingcount',
'outbound',
'inbound',
'blockcount',
'set_to',
'mimic',
'addeditem',
'outfit',
'backpack',
'pet',
'pickaxe',
'emote',
'emoji',
'toy',
'command_from',
'whisper',
'partychat',
'discord',
'disable_perfect',
'invite',
'accept',
'decline',
'friend_request',
'join_',
'message',
'randommessage',
'decline_invite_for',
'enter_to_join_party',
'party_leave',
'user_invited',
'enter_to_invite_user',
'user_sent',
'enter_to_send',
'party_sent',
'status',
'avatar',
'color_must_be',
'banner',
'bannerid',
'color',
'level',
'bpinfo',
'tier',
'xpboost',
'friendxpboost',
'privacy',
'public',
'friends_allow_friends_of_friends',
'friends',
'private_allow_friends_of_friends',
'private',
'lastlogin',
'member_count',
'enter_to_show_info',
'itemname',
'remove_pending',
'already_friend',
'enter_to_send_friendrequest',
'remove_friend',
'remove_allfriend',
'enter_to_remove_friend',
'enter_to_accept_pending',
'enter_to_decline_pending',
'already_block',
'block_user',
'enter_to_block_user',
'not_block',
'unblock_user',
'enter_to_unblock_user',
'optional',
'reason',
'chatban_user',
'already_chatban',
'enter_to_chatban_user',
'promote_user',
'already_party_leader',
'enter_to_promote_user',
'kick_user',
'cant_kick_yourself',
'enter_to_kick_user',
'hide_user',
'hide_all_user',
'enter_to_hide_user',
'show_user',
'show_all_user',
'enter_to_show_user',
'readystate',
'ready',
'unready',
'sitout',
'matchstate',
'remaining',
'remaining_must_be_between_0_and_255',
'swap_user',
'enter_to_swap_user',
'lock',
'stopped',
'locked',
'all_end',
'enter_to_change_asset',
'setname',
'no_stylechange',
'enter_to_set_style',
'assetpath',
'set_playlist',
'please_enter_valid_number',
'web',
'web_running',
'web_login',
'web_logout',
'web_logged',
'web_not_logged',
'invalid_password',
'main_page',
'config_editor',
'commands_editor',
'replies_editor',
'party_viewer',
'password',
'web_save',
'web_save_reload',
'web_saved',
'web_back',
'account_not_exists',
'account_not_loaded',
'party_moving',
'loading',
'command',
'run',
'result',
'web_notfound',
'web_already_running',
'failed_to_run_web',
'this_field_is_required',
'this_field_fix_required',
'trigger',
'text',
'cannot_be_empty',
'restart',
'config_fortnite_email',
'config_fortnite_owner',
'config_fortnite_platform',
'config_fortnite_cid',
'config_fortnite_bid',
'config_fortnite_pickaxe_id',
'config_fortnite_eid',
'config_fortnite_playlist',
'config_fortnite_banner',
'config_fortnite_banner_color',
'config_fortnite_avatar_id',
'config_fortnite_avatar_color',
'config_fortnite_level',
'config_fortnite_tier',
'config_fortnite_xpboost',
'config_fortnite_friendxpboost',
'config_fortnite_status',
'config_fortnite_privacy',
'config_fortnite_whisper',
'config_fortnite_partychat',
'config_fortnite_disablewhisperperfectly',
'config_fortnite_disablepartychatperfectly',
'config_fortnite_ignorebot',
'config_fortnite_joinmessage',
'config_fortnite_joinmessageenable',
'config_fortnite_randommessage',
'config_fortnite_randommessageenable',
'config_fortnite_joinemote',
'config_fortnite_outfitmimic',
'config_fortnite_backpackmimic',
'config_fortnite_pickaxemimic',
'config_fortnite_emotemimic',
'config_fortnite_mimic-ignorebot',
'config_fortnite_mimic-ignoreblacklist',
'config_fortnite_acceptinvite',
'config_fortnite_acceptfriend',
'config_fortnite_addfriend',
'config_fortnite_invite-ownerdecline',
'config_fortnite_inviteinterval',
'config_fortnite_interval',
'config_fortnite_waitinterval',
'config_fortnite_hide-user',
'config_fortnite_hide-blacklist',
'config_fortnite_show-owner',
'config_fortnite_show-whitelist',
'config_fortnite_show-bot',
'config_fortnite_blacklist',
'config_fortnite_blacklist-declineinvite',
'config_fortnite_blacklist-autoblock',
'config_fortnite_blacklist-autokick',
'config_fortnite_blacklist-autochatban',
'config_fortnite_blacklist-ignorecommand',
'config_fortnite_whitelist',
'config_fortnite_whitelist-allowinvite',
'config_fortnite_whitelist-declineinvite',
'config_fortnite_whitelist-ignorelock',
'config_fortnite_whitelist-ownercommand',
'config_fortnite_whitelist-ignoreng',
'config_fortnite_invitelist',
'config_fortnite_otherbotlist',
'config_discord_enabled',
'config_discord_token',
'config_discord_owner',
'config_discord_channels',
'config_discord_status',
'config_discord_discord',
'config_discord_disablediscordperfectly',
'config_discord_ignorebot',
'config_discord_blacklist',
'config_discord_blacklist-ignorecommand',
'config_discord_whitelist',
'config_discord_whitelist-ignorelock',
'config_discord_whitelist-ownercommand',
'config_discord_whitelist-ignoreng',
'config_web_enabled',
'config_web_ip',
'config_web_port',
'config_web_password',
'config_web_login_required',
'config_web_web',
'config_web_log',
'config_replies-matchmethod',
'config_ng-words',
'config_ng-word-matchmethod',
'config_ng-word-kick',
'config_ng-word-chatban',
'config_ng-word-blacklist',
'config_lang',
'config_restart_in',
'config_search-lang',
'config_no-logs',
'config_ingame-error',
'config_discord-log',
'config_omit-over2000',
'config_hide-email',
'config_hide-token',
'config_hide-webhook',
'config_webhook',
'config_caseinsensitive',
'config_loglevel',
'config_debug',
'bool_true',
'bool_false',
'bool_none',
'boot_switch',
'info_closed',
'info_booting',
'info_ready'
]
error_config = []
error_commands = []
outfit_keys = ("cid", "outfit", "outfitmimic", "outfitlock", "alloutfit", "outfitasset")
backpack_keys = ("bid", "backpack", "backpackmimic", "backpacklock", "allbackpack", "backpackasset")
pet_keys = ("petcarrier", "pet", "allpet")
pickaxe_keys = ("pickaxe_id", "pickaxe", "pickaxemimic", "pickaxelock", "allpickaxe", "pickaxeasset")
emote_keys = ("eid", "emote", "emotemimic", "emotelock", "allemote", "emoteasset")
emoji_keys = ("emoji_id", "emoji", "allemoji")
toy_keys = ("toy_id", "toy", "alltoy")
item_keys = ("id", "item")
app = Sanic(__name__)
app.secret_key = os.urandom(32)
app.static('/images', './templates/images')
env = Environment(loader=FileSystemLoader('./templates', encoding='utf8'), extensions=['jinja2.ext.do'])
auth = LoginManager()
fortnitepy_auth = fortnitepy.Auth()
launcher_token = fortnitepy_auth.ios_token
fortnite_token = fortnitepy_auth.fortnite_token
oauth_url = "https://account-public-service-prod03.ol.epicgames.com/account/api/oauth/token"
fortnite_token_url = "https://account-public-service-prod03.ol.epicgames.com/account/api/oauth/token"
exchange_auth_url = "https://account-public-service-prod.ol.epicgames.com/account/api/oauth/token"
device_auth_url = "https://account-public-service-prod.ol.epicgames.com/account/api/oauth/deviceAuthorization"
exchange_url = "https://account-public-service-prod.ol.epicgames.com/account/api/oauth/exchange"
user_lookup_url = "https://account-public-service-prod.ol.epicgames.com/account/api/public/account/{user_id}"
if not load_config():
sys.exit(1)
if error_config or error_commands:
bot_ready = False
for key in error_config:
config_tags[key].append("fix_required")
for key in error_commands:
commands_tags[key].append("fix_required")
search_max = data["search_max"]
if data['debug']:
logger = logging.getLogger('fortnitepy.auth')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[36m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
logger = logging.getLogger('fortnitepy.http')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[36m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
logger = logging.getLogger('fortnitepy.xmpp')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[35m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
if os.getcwd().startswith('/app') or os.getcwd().startswith('/home/runner'):
data['web']['ip'] = "0.0.0.0"
else:
data['web']['ip'] = data['web']['ip'].format(ip=socket.gethostbyname(socket.gethostname()))
if True:
send(l('bot'),f'{l("lobbybot")}: gomashio\n{l("credit")}\n{l("library")}: Terbau',cyan)
text = ""
if data['loglevel'] == 'normal':
text += f'\n{l("loglevel")}: {l("normal")}\n'
elif data['loglevel'] == 'info':
text += f'\n{l("loglevel")}: {l("info")}\n'
elif data['loglevel'] == 'debug':
text += f'\n{l("loglevel")}: {l("debug")}\n'
if data.get('debug',False) is True:
text += f'\n{l("debug")}: {l("on")}\n'
else:
text += f'\n{l("debug")}: {l("off")}\n'
text += f'\nPython {platform.python_version()}\n'
text += f'fortnitepy {fortnitepy.__version__}\n'
text += f'discord.py {discord.__version__}\n'
text += f'Sanic {sanic.__version__}\n'
send(l('bot'),text,green)
if data.get('debug',False) is True:
send(l('bot'),f'[{now()}] {l("debug_is_on")}',red)
send(l('bot'),l("booting"))
dclient = discord.Client()
dclient.isready = False
if True: #discord
@dclient.event
async def on_ready() -> None:
loop = asyncio.get_event_loop()
dclient_user = name(dclient.user)
send(dclient_user,f"{l('login')}: {dclient_user}",green,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}')
dclient.isready = True
loop.create_task(status_loop())
owner = dclient.get_user(int(data['discord']['owner']))
if not owner:
try:
owner = await dclient.fetch_user(int(data['discord']['owner']))
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l('error_while_requesting_userinfo'),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
if not owner:
send(dclient_user,l('discord_owner_notfound'),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
else:
dclient.owner = owner
send(dclient_user,f"{l('owner')}: {name(dclient.owner)}",green,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}')
lists = {
"blacklist_": "blacklist",
"whitelist_": "whitelist"
}
async def _(listuser: str) -> None:
listuser = int(listuser)
user = dclient.get_user(listuser)
if not user:
try:
user = await dclient.fetch_user(listuser)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l(f'discord_{data_}_user_notfound', listuser),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
return
globals()[list_].append(user.id)
for list_,data_ in lists.items():
await asyncio.gather(*[_(listuser) for listuser in data['discord'][data_]])
if data['loglevel'] == "debug":
send(dclient_user,f"discord {data_}list {globals()[list_]}",yellow,add_d=lambda x:f'```\n{x}\n```')
@dclient.event
async def on_message(message: discord.Message) -> None:
await process_command(message)
async def status_loop() -> None:
while True:
try:
var = globals()
activity = discord.Game(name=data['discord']['status'].format(**var))
await dclient.change_presence(activity=activity)
except Exception:
send(dclient.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await asyncio.sleep(30)
select_bool = select(
[
{"value": "True","display_value": l('bool_true')},
{"value": "False","display_value": l('bool_false')}
]
)
select_bool_none = select(
[
{"value": "True","display_value": l('bool_true')},
{"value": "False","display_value": l('bool_false')},
{"value": "None","display_value": l('bool_none')}
]
)
select_platform = select(
[
{"value": "WIN","display_value": "Windows"},
{"value": "MAC","display_value": "Mac"},
{"value": "PSN","display_value": "PlayStation"},
{"value": "XBL","display_value": "Xbox"},
{"value": "SWT","display_value": "Switch"},
{"value": "IOS","display_value": "IOS"},
{"value": "AND","display_value": "Android"}
]
)
select_privacy = select(
[
{"value": "public","display_value": l('public')},
{"value": "friends_allow_friends_of_friends","display_value": l('friends_allow_friends_of_friends')},
{"value": "friends","display_value": l('friends')},
{"value": "private_allow_friends_of_friends","display_value": l('private_allow_friends_of_friends')},
{"value": "private","display_value": l('private')}
]
)
select_matchmethod = select(
[
{"value": i,"display_value": i} for i in ["full","contains","starts","ends"]
]
)
select_loglevel = select(
[
{"value": "normal","display_value": l('normal')},
{"value": "info","display_value": l('info')},
{"value": "debug","display_value": l('debug')}
]
)
select_lang = select(
[
{"value": re.sub(r"lang(\\|/)","",i).replace(".json",""),"display_value": re.sub(r"lang(\\|/)","",i).replace(".json","")} for i in glob("lang/*.json") if "_old.json" not in i
]
)
select_ben_lang = select(
[
{"value": i,"display_value": i} for i in ["ar","de","en","es","es-419","fr","it","ja","ko","pl","pt-BR","ru","tr","zh-CN","zh-Hant"]
]
)
for key,value in config_tags.items():
for count,tag in enumerate(value):
if tag == "can_be_multiple":
config_tags[key][count] = CanBeMultiple
elif tag == "select_bool":
config_tags[key][count] = select_bool
elif tag == "select_bool_none":
config_tags[key][count] = select_bool_none
elif tag == "select_platform":
config_tags[key][count] = select_platform
elif tag == "select_privacy":
config_tags[key][count] = select_privacy
elif tag == "select_loglevel":
config_tags[key][count] = select_loglevel
elif tag == "select_lang":
config_tags[key][count] = select_lang
elif tag == "select_ben_lang":
config_tags[key][count] = select_ben_lang
elif tag == "select_matchmethod":
config_tags[key][count] = select_matchmethod
elif tag == "red":
config_tags[key][count] = Red
elif tag == "fix_required":
config_tags[key][count] = FixRequired
for key,value in commands_tags.items():
for count,tag in enumerate(value):
if tag == "can_be_multiple":
commands_tags[key][count] = CanBeMultiple
elif tag == "red":
commands_tags[key][count] = Red
elif tag == "fix_required":
commands_tags[key][count] = FixRequired
if True: #Web
@app.route("/favicon.ico", methods=["GET"])
async def favicon(request: Request):
return sanic.response.redirect("/images/icon.png")
if os.environ.get("FORTNITE_LOBBYBOT_STATUS") == "-1":
@app.route("/", methods=["GET"])
async def main(request: Request):
return sanic.response.html(
"<h2>Fortnite-LobbyBot<h2>"
"<p>初めに<a href='https://github.com/gomashio1596/Fortnite-LobbyBot/blob/master/README.md' target='_blank'>README</a>をお読みください</p>"
"<p>First, please read <a href='https://github.com/gomashio1596/Fortnite-LobbyBot/blob/master/README_EN.md' target='_blank'>README<a/></p>"
"<p>質問などは私(Twitter @gomashio1596 Discord gomashio#4335)か<a href='https://discord.gg/NEnka5N' target='_blank'>Discordサーバー</a>まで</p>"
"<p>For questions, Contact to me(Twitter @gomashio1596 Discord gomashio#4335) or ask in <a href='https://discord.gg/NEnka5N' target='_blank'>Discord server</a></p>"
"<p><a href='https://glitch.com/edit/#!/remix/fortnite-lobbybot' target='_blank'>ここをクリック</a>してRemix</p>"
"<p><a href='https://glitch.com/edit/#!/remix/fortnite-lobbybot' target='_blank'>Click here</a> to Remix</p>"
"<a href='https://discord.gg/NEnka5N' target='_blank'><img src='https://discordapp.com/api/guilds/718709023427526697/widget.png?style=banner1'></img></a>"
)
elif data["status"] == 0:
@app.route("/", methods=["GET", "POST"])
async def main(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("config.json")
return render_template(
"config_editor.html",
l=l,
data=data,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
flag = False
raw = request.form
data = load_json("config.json")
corrected = data
for key_,tags in config_tags.items():
keys = key_.replace("'","").replace("[","").split("]")
key = keys[0]
nest = len(keys) - 1
if nest == 1:
if dict in tags:
if not corrected.get(key):
corrected[key] = {}
else:
value = raw.get(f"['{key}']")
if FixRequired in tags and value == corrected.get(key):
flash_messages_red.append(l('this_field_fix_required', key))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key] = ",".join([i for i in re.split(r'\n|\r',value) if i]) if value else ""
elif list in tags:
corrected[key] = re.split(r'\r\n|\n',value) if value else []
elif str in tags:
corrected[key] = value.replace(r"\\n",r"\n").replace(r"\n","\n") if value else ""
elif int in tags:
corrected[key] = int(value) if value else 0
elif bool_ in tags:
corrected[key] = bool_.create(value)
elif bool_none in tags:
corrected[key] = bool_none.create(value)
elif nest == 2:
key2 = keys[1]
if dict in tags:
if not corrected.get(key):
if not corrected.get(key).get(key2):
corrected[key][key2] = {}
else:
value2 = raw.get(f"['{key}']['{key2}']")
if FixRequired in tags and value2 == corrected.get(key,{}).get(key2):
flash_messages_red.append(l('this_field_fix_required', f"{key}: {key2}"))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key][key2] = ",".join([i for i in re.split(r'\n|\r',value2) if i]) if value2 else ""
elif list in tags:
corrected[key][key2] = re.split(r'\r\n|\n',value2) if value2 else []
elif str in tags:
corrected[key][key2] = value2.replace(r"\\n",r"\n").replace(r"\n","\n") if value2 else ""
elif int in tags:
corrected[key][key2] = int(value2) if value2 else 0
elif bool_ in tags:
corrected[key][key2] = bool_.create(value2)
elif bool_none in tags:
corrected[key][key2] = bool_none.create(value2)
if flag:
return render_template(
"config_editor.html",
l=l,
data=data,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
corrected["status"] = 1
with open('config.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
Thread(target=restart,args=(1,)).start()
return sanic.response.redirect("/")
else:
@app.route("/", methods=["GET", "POST"])
async def main(request: Request):
if request.method == "GET":
return render_template(
"main.html",
l=l,
authenticated=auth.authenticated(request),
data=data
)
elif request.method == "POST":
if auth.authenticated(request):
Thread(target=restart,args=(1,)).start()
return sanic.response.redirect("/")
@app.route("/login", methods=["GET", "POST"])
async def login(request: Request):
if auth.authenticated(request):
return sanic.response.redirect("/")
else:
flash_messages = []
if request.method == "GET":
return render_template("login.html", l=l, flash_messages=flash_messages)
elif request.method == "POST":
if request.form.get("password","") == data["web"]["password"]:
r = sanic.response.redirect("/")
auth.login_user(request, r)
return r
else:
flash_messages.append(l('invalid_password'))
return render_template("login.html", l=l, flash_messages=flash_messages)
@app.route("/text")
@auth.login_required
async def web_text_(request: Request):
return sanic.response.json(
{
"text": web_text
}
)
@app.route("/logout")
@auth.login_required
async def logout(request: Request):
r = sanic.response.redirect("/")
auth.logout_user(request, r)
return r
@app.route("/config_editor", methods=["GET", "POST"])
@auth.login_required
async def config_editor(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("config.json")
return render_template(
"config_editor.html",
l=l,
data=data,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
flag = False
raw = request.form
data = load_json("config.json")
corrected = data
for key_,tags in config_tags.items():
keys = key_.replace("'","").replace("[","").split("]")
key = keys[0]
nest = len(keys) - 1
if nest == 1:
if dict in tags:
if not corrected.get(key):
corrected[key] = {}
else:
value = raw.get(f"['{key}']")
if FixRequired in tags and value == corrected.get(key):
flash_messages_red.append(l('this_field_fix_required', key))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key] = ",".join([i for i in re.split(r'\n|\r',value) if i]) if value else ""
elif list in tags:
corrected[key] = re.split(r'\r\n|\n',value) if value else []
elif str in tags:
corrected[key] = value.replace(r"\\n",r"\n").replace(r"\n","\n") if value else ""
elif int in tags:
corrected[key] = int(value) if value else 0
elif bool_ in tags:
corrected[key] = bool_.create(value)
elif bool_none in tags:
corrected[key] = bool_none.create(value)
elif nest == 2:
key2 = keys[1]
if dict in tags:
if not corrected.get(key):
if not corrected.get(key).get(key2):
corrected[key][key2] = {}
else:
value2 = raw.get(f"['{key}']['{key2}']")
if FixRequired in tags and value2 == corrected.get(key,{}).get(key2):
flash_messages_red.append(l('this_field_fix_required', f"{key}: {key2}"))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key][key2] = ",".join([i for i in re.split(r'\n|\r',value2) if i]) if value2 else ""
elif list in tags:
corrected[key][key2] = re.split(r'\r\n|\n',value2) if value2 else []
elif str in tags:
corrected[key][key2] = value2.replace(r"\\n",r"\n").replace(r"\n","\n") if value2 else ""
elif int in tags:
corrected[key][key2] = int(value2) if value2 else 0
elif bool_ in tags:
corrected[key][key2] = bool_.create(value2)
elif bool_none in tags:
corrected[key][key2] = bool_none.create(value2)
if flag:
return render_template(
"config_editor.html",
l=l,
data=corrected,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
corrected["status"] = 1
with open('config.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
if raw.get("reload"):
Thread(target=restart, args=(1,)).start()
return sanic.response.redirect("/")
else:
flash_messages.append(l('web_saved'))
return render_template(
"config_editor.html",
l=l,
data=corrected,
config_tags=config_tags,
len=len,
join=str.join,
split=str.split,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
@app.route("/commands_editor", methods=["GET", "POST"])
@auth.login_required
async def commands_editor(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("commands.json")
return render_template(
"commands_editor.html",
l=l,
data=data,
commands_tags=commands_tags,
len=len,
join=str.join,
split=str.split,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
elif request.method == "POST":
flag = False
raw = request.form
data = load_json("commands.json")
corrected = data
for key_,tags in commands_tags.items():
keys = key_.replace("'","").replace("[","").split("]")
key = keys[0]
nest = len(keys) - 1
if nest == 1:
if dict in tags:
if not corrected[key]:
corrected[key] = {}
else:
value = raw.get(f"['{key}']")
if FixRequired in tags and value == corrected.get(key):
flash_messages_red.append(l('this_field_fix_required', key))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key] = ",".join([i for i in re.split(r'\n|\r',value) if i]) if value else ""
if flag:
return render_template(
"commands_editor.html",
l=l,
data=corrected,
commands_tags=commands_tags,
len=len,
join=str.join,
split=str.split,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
with open('commands.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
if raw.get("reload"):
Thread(target=restart, args=(1,)).start()
return sanic.response.redirect("/")
else:
flash_messages.append(l('web_saved'))
return render_template(
"commands_editor.html",
l=l,
data=corrected,
commands_tags=commands_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
@app.route("/replies_editor", methods=["GET", "POST"])
@auth.login_required
async def replies_editor(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("replies.json")
return render_template(
"replies_editor.html",
l=l,
data=data,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red,
len=len,
enumerate=enumerate,
str=str
)
elif request.method == "POST":
raw = request.form
corrected = {}
for num in range(0,int(raw["number"][0])):
trigger = raw.get(f"trigger{str(num)}")
if not trigger:
flash_messages_red.append(l('cannot_be_empty'))
break
content = raw.get(f"content{str(num)}")
if not content:
flash_messages_red.append(l('cannot_be_empty'))
break
corrected[trigger] = content
with open('replies.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
if raw.get("reload"):
Thread(target=restart, args=(1,)).start()
return sanic.response.redirect("/")
else:
flash_messages.append(l('web_saved'))
return render_template(
"replies_editor.html",
l=l,
data=corrected,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red,
len=len,
enumerate=enumerate,
str=str
)
@app.route("/party_viewer", methods=["GET"])
@auth.login_required
async def party_viewer(request: Request):
return render_template(
"party_viewer.html",
l=l,
clients=clients,
enumerate=enumerate
)
@app.route("/clients<num>", methods=["GET", "POST"])
@auth.login_required
async def clients_viewer(request: Request, num: str):
num = int(num)
client = clients[num] if len(clients[num:num+1]) == 1 else None
if not client:
sanic.exceptions.abort(404)
flash_messages = []
if request.method == "GET":
return render_template(
"clients_viewer.html",
l=l,
client=client,
none=None,
len=len,
flash_messages=flash_messages
)
else:
if request.form.get("command"):
content = request.form["command"][0] if isinstance(request.form["command"],list) else request.form["command"]
message = WebMessage(content, request.cookies.get(auth.cookie_key, 'NoID'), client)
await process_command(message)
result = message.result
if result:
for mes in message.result:
for m in mes.split('\n'):
flash_messages.append(m)
return render_template(
"clients_viewer.html",
l=l,
client=client,
none=None,
len=len,
flash_messages=flash_messages
)
else:
return sanic.response.redirect(f"/clients{num}")
@app.route("/clients_info/<num>", methods=["GET"])
@auth.login_required
async def clients_info(request: Request, num: str):
num = int(num)
client = clients[num] if len(clients[num:num+1]) == 1 else None
if not client:
return sanic.response.json(
{
"error": "account_not_exists"
}
)
elif not client.isready:
return sanic.response.json(
{
"error": "account_not_loaded"
}
)
elif not client.party or not client.party.me:
return sanic.response.json(
{
"error": "party_moving"
}
)
else:
return sanic.response.json(
{
"display_name": client.user.display_name,
"id": client.user.id,
"leader": client.party.me.leader,
"banner": search_banner(client.party.me.banner[0]),
"level": client.party.me.banner[2],
"outfit": member_asset(client.party.me, "outfit"),
"outfit_variants": client.party.me.outfit_variants,
"backpack": member_asset(client.party.me, "backpack"),
"backpack_variants": client.party.me.backpack_variants,
"pickaxe": member_asset(client.party.me, "pickaxe"),
"pickaxe_variants": client.party.me.pickaxe_variants,
"contrail": member_asset(client.party.me, "contrail"),
"emote": member_asset(client.party.me, "emote"),
"party_id": client.party.id,
"members": [
{
"display_name": i.display_name,
"id": i.id,
"leader": i.leader,
"banner": search_banner(i.banner[0]),
"level": i.banner[2],
"outfit": member_asset(i, "outfit"),
"outfit_variants": i.outfit_variants,
"backpack": member_asset(i, "backpack"),
"backpack_variants": i.backpack_variants,
"pickaxe": member_asset(i, "pickaxe"),
"pickaxe_variants": i.pickaxe_variants,
"contrail": member_asset(i, "contrail"),
"emote": member_asset(i, "emote")
} for i in client.party.members.copy().values()
]
}
)
@app.route("/boot_switch", methods=["GET", "POST"])
@auth.login_required
async def boot_switch(request: Request):
if request.method == "GET":
return render_template(
"boot_switch.html",
l=l,
len=len
)
elif request.method == "POST":
raw = request.form
for i in raw.keys():
if "on" in i or "off" in i:
break
on_or_off = i
num = int(re.sub(r"on|off","", on_or_off))
on_or_off = i.replace(str(num),"")
loop = asyncio.get_event_loop()
if on_or_off == "on":
clients[num].booting = True
loop.create_task(clients[num].start())
elif on_or_off == "off":
loop.create_task(clients[num].close())
return sanic.response.redirect("/boot_switch")
@app.route("/boot_info", methods=["GET"])
@auth.login_required
async def boot_info(request: Request):
data = {}
for client in clients:
if not client.booting and not client.isready:
data[client.email] = {
"info": "info_closed",
"booting": client.booting,
"isready": client.isready
}
elif client.booting:
data[client.email] = {
"info": "info_booting",
"booting": client.booting,
"isready": client.isready
}
elif client.isready:
data[client.email] = {
"info": "info_ready",
"booting": client.booting,
"isready": client.isready
}
return sanic.response.json(data)
@app.exception(sanic.exceptions.NotFound)
async def not_found(request: Request, exception: Exception):
return render_template("not_found.html", l=l)
@auth.no_auth_handler
async def unauthorized(request: Request, *args, **kwargs):
return sanic.response.redirect("/")
loop = asyncio.get_event_loop()
if data.get('web',{}).get('enabled',True) is True or data.get('status',1) == 0:
loop.create_task(run_app())
Thread(target=dprint,args=(),daemon=True).start()
if data.get("status",1) != 0:
langs = [data["search-lang"],"en"] if data["search-lang"] != "en" else ["en"]
Thread(target=store_item_data,args=(langs,)).start()
Thread(target=store_banner_data).start()
for email in data["fortnite"]["email"].split(','):
email = email.strip()
try:
device_auth_details = get_device_auth_details().get(email.lower(), {})
if not device_auth_details:
device_auth_details = loop.run_until_complete(generate_device_auth_and_store(email))
client = Client(
auth=fortnitepy.DeviceAuth(
**device_auth_details
),
default_party_config=fortnitepy.DefaultPartyConfig(
privacy=data['fortnite']['privacy']
),
default_party_member_config=fortnitepy.DefaultPartyMemberConfig(
meta=[
partial(ClientPartyMember.set_outfit, data['fortnite']['cid'].replace('cid','CID',1)),
partial(ClientPartyMember.set_backpack, data['fortnite']['bid'].replace('bid','BID',1)),
partial(ClientPartyMember.set_pickaxe, data['fortnite']['pickaxe_id'].replace('pickaxe_id','Pickaxe_ID',1)),
partial(ClientPartyMember.set_battlepass_info, has_purchased=True, level=data['fortnite']['tier'], self_boost_xp=data['fortnite']['xpboost'], friend_boost_xp=data['fortnite']['friendxpboost']),
partial(ClientPartyMember.set_banner, icon=data['fortnite']['banner'], color=data['fortnite']['banner_color'], season_level=data['fortnite']['level'])
]
),
platform=fortnitepy.Platform(data['fortnite']['platform'].upper())
)
except ValueError:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),l('error_while_setting_client'),red,add_d=lambda x:f'>>> {x}')
continue
clients.append(client)
if data.get('status',1) != 0 and bot_ready:
loop.create_task(run_bot())
try:
loop.run_forever()
except KeyboardInterrupt:
sys.exit(1)
|
tracks_manipulation_lib.py | import sys, os, fnmatch
import pandas as pd
import numpy as np
import sys
import random
from trackml.dataset import load_event
from trackml.dataset import load_dataset
from trackml.randomize import shuffle_hits
from trackml.score import score_event
import multiprocessing
from multiprocessing import Process, Value, Lock
import glob, os
#sys.path.append('/home/silvio/github/track-ml-1/utils')
#from tracktop import *
#obtain amount of columns
def amount_of_columns(cell):
indt=0
test=0
indret=0
for z in cell:
indt=indt+1
#print("z")
if ((z == 0) and (test == 0)) :
test=1
indret=indt
return(indret) # ind is the amount of columns =! 0
def create_directory_for_results(temporary_directory):
if (os.path.isdir(temporary_directory)):
temp_dir = temporary_directory+"//*"
files_in_temp_dir = glob.glob(temp_dir)
for file in files_in_temp_dir:
#print("remove ", file)
os.remove(file)
else:
os.mkdir(temporary_directory)
def join_files(temporary_directory, output_file_real):
files = []
for r, d, f in os.walk(temporary_directory):
for file in f:
files.append(os.path.join(r, file))
with open(output_file_real, 'w') as outfile:
for fname in files:
with open(fname) as infile:
outfile.write(infile.read())
#function to put particle informatio and all the hits of each track in a single line
def create_tracks(tot_columns , hits, cells, particles, truth, be,e,pid,temporary_directory):
b = np.zeros((0))
for index, row in particles.iloc[be:e,:].iterrows():
truth_0 = truth[truth.particle_id == row['particle_id']]
par=particles[['vx','vy','vz','px','py','pz']].loc[particles['particle_id'] == row['particle_id']]
particleRow = [par['vx'].values[0],par['vy'].values[0],par['vz'].values[0],par['px'].values[0],par['py'].values[0],par['pz'].values[0]]
psize=par.size
b = np.concatenate((b, particleRow))
#print(truth_0.size)
#print(truth_0.shape)
h = np.zeros((0))
#jj=0
for index, row in truth_0.iterrows():
ch=cells[['ch0']].loc[cells['hit_id'] == row['hit_id']].mean()
ch1=cells[['ch1']].loc[cells['hit_id'] == row['hit_id']].mean()
vl=cells[['value']].loc[cells['hit_id'] == row['hit_id']].mean()
hitRow = [row['tx'],row['ty'],row['tz'],ch[0], ch1[0], vl[0]]
h= np.concatenate((h, hitRow))
hsize=h.size
b=np.concatenate((b, h))
aux = np.zeros((0))
remaing_columns_to_zero=tot_columns-1-h.size-6
if (remaing_columns_to_zero > 0):
aux = np.zeros(remaing_columns_to_zero)
auxsize=aux.size
b=np.concatenate((b, aux))
#print("bb ", b)
#print("psize ", psize, "hsize ", hsize, "auxsize ", auxsize, "sum ", psize+hsize+auxsize)
rw=(e-be)
b = b.reshape(rw, (tot_columns-1))
np.savetxt(temporary_directory+"//arr"+str(pid), b, fmt="%s")
def createTracks(event_prefix , dir_event_prefix, diroutput):
#global Am_of_cores
#global Am_of_particles
#global total_of_loops
#global remaining_tracks
print(dir_event_prefix + " - " + event_prefix)
hits, cells, particles, truth = load_event(os.path.join(dir_event_prefix, event_prefix))
#X = np.zeros((0))
#121 columns -> 6 particles columns; 19 hits (6 columns); um result columns (fake or real) ==> 6x19 + 6 +1 =121
#tot_columns=121
tot_columns=175
Am_of_particles = particles.shape[0]
Am_of_cores = multiprocessing.cpu_count()-2
total_of_loops = Am_of_particles // Am_of_cores
remaining_tracks = (Am_of_particles-(total_of_loops*Am_of_cores))
#output_file_all = "/data/output/TracksRealFake"+str(event_prefix)+".csv"
#output_file_real = "/data/output/"+str(dir_event_prefix)+"/TracksReal"+str(event_prefix)+".csv"
output_file_real = str(diroutput)+"/TracksReal"+str(event_prefix)+".csv"
#output_file_real_aux = "/data/output/TracksRealAUX"+str(event_prefix)+".csv"
#output_file_fake = "/data/output/TracksFake"+str(event_prefix)+".csv"
temporary_directory = "/tmp/res/"+str(event_prefix)+"/"
#output_file_all = "/data/output/TracksRealFake"+str(event_prefix)+".csv"
#output_file_real = "/data/output/TracksReal"+str(event_prefix)+".csv"
#output_file_real_aux = "/data/output/TracksRealAUX"+str(event_prefix)+".csv"
#output_file_fake = "/data/output/TracksFake"+str(event_prefix)+".csv"
#print("temporary_directory: ", temporary_directory)
#print("Amount of Particles: ", Am_of_particles)
#print("Amount of Processing cores: ", Am_of_cores)
#print("total of loops: ", total_of_loops)
#print("remaing tracks : ", remaining_tracks)
print("output_file_real : " , output_file_real)
step=1
pid=0
create_directory_for_results(temporary_directory)
jobs = []
for i in range(Am_of_cores+1):
#for i in range(1):
b=i*total_of_loops
if (i == Am_of_cores):
e=b+remaining_tracks
else:
e=b+total_of_loops
#e=10
#b=1
p = multiprocessing.Process(target=create_tracks, args=(tot_columns, hits, cells, particles, truth,b,e,pid,temporary_directory))
#p = multiprocessing.Process(target=count_hits, args=(b,e,pid,temporary_directory))
#print ("multiprocessing: ", b,e)
pid=pid+1
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
del jobs[:]
join_files(temporary_directory, output_file_real)
tracks = pd.read_csv(output_file_real,header = None, sep = " ")
tracks.to_csv(output_file_real)
|
example_APImode2.py | #!/usr/bin/env python
import threading
import time
import XBee_API
import read_comm
# authorship info
__author__ = "Francesco Vallegra"
__copyright__ = "Copyright 2017, MIT-SUTD"
__license__ = "MIT"
# create XBee module object
# NOTE: no need to specify serial port (can be found automatically) BUT the baud rate of the serial MUST be 9600
x_bee = XBee_API.XBee_module(AP=2)
# NOTE: same thing could have been achieved by using: x_bee = XBee_API.XBee_module()
# NOTE: more options can be set when creating the object, like:
# - XBee Type: CE=[0: Router; 2: EndPoint]
# - Network ID: ID=hex number from 0x0000 to 0xffff [default 0x7fff]
# thread for the incoming XBee messages
comm_thread = threading.Thread(target=read_comm.read_comm, args=(x_bee,))
comm_thread.setDaemon(True)
comm_thread.start()
# keep the program alive till stopped by user
while 1:
time.sleep(1.)
|
test_transport.py | """Unit tests for the transport module."""
from datetime import datetime
import logging
try:
import queue
except ImportError:
import Queue as queue
import os
import select
import socket
import ssl
from struct import pack
import sys
import threading
import time
import pytest
from pydicom import dcmread
import pynetdicom
from pynetdicom import AE, evt, _config, debug_logger
from pynetdicom.association import Association
from pynetdicom.events import Event
from pynetdicom._globals import MODE_REQUESTOR, MODE_ACCEPTOR
from pynetdicom import transport
from pynetdicom.transport import (
AssociationSocket, AssociationServer, ThreadedAssociationServer
)
from pynetdicom.sop_class import VerificationSOPClass, RTImageStorage
from .hide_modules import hide_modules
# This is the directory that contains test data
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
CERT_DIR = os.path.join(TEST_ROOT, 'cert_files')
DCM_DIR = os.path.join(TEST_ROOT, 'dicom_files')
# SSL Testing
SERVER_CERT, SERVER_KEY = (
os.path.join(CERT_DIR, 'server.crt'),
os.path.join(CERT_DIR, 'server.key')
)
CLIENT_CERT, CLIENT_KEY = (
os.path.join(CERT_DIR, 'client.crt'),
os.path.join(CERT_DIR, 'client.key')
)
DATASET = dcmread(os.path.join(DCM_DIR, 'RTImageStorage.dcm'))
#debug_logger()
class TestAssociationSocket(object):
"""Tests for the transport.AssociationSocket class."""
def setup(self):
ae = AE()
self.assoc = Association(ae, MODE_REQUESTOR)
def get_listen_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 1, 0)
)
sock.bind(('', 11112))
sock.listen(5)
return sock
def test_init_new(self):
"""Test creating a new AssociationSocket instance."""
sock = AssociationSocket(self.assoc)
assert sock.tls_args is None
assert sock.select_timeout == 0.5
assert sock._assoc == self.assoc
assert isinstance(sock.socket, socket.socket)
assert sock._is_connected is False
with pytest.raises(queue.Empty):
sock.event_queue.get(block=False)
def test_init_address(self):
"""Test creating a new bound AssociationSocket instance."""
sock = AssociationSocket(self.assoc, address=('', 11112))
assert sock.tls_args is None
assert sock.select_timeout == 0.5
assert sock._assoc == self.assoc
assert isinstance(sock.socket, socket.socket)
assert sock.socket.getsockname()[0] == '0.0.0.0'
assert sock.socket.getsockname()[1] == 11112
assert sock._is_connected is False
with pytest.raises(queue.Empty):
sock.event_queue.get(block=False)
def test_init_existing(self):
"""Test creating a new AssociationSocket around existing socket."""
sock = AssociationSocket(self.assoc, client_socket='abc')
assert sock.tls_args is None
assert sock.select_timeout == 0.5
assert sock._assoc == self.assoc
assert sock.socket == 'abc'
assert sock._is_connected is True
assert sock.event_queue.get(block=False) == "Evt5"
def test_init_raises(self, caplog):
"""Test exception is raised if init with client_socket and address."""
msg = (
r"AssociationSocket instantiated with both a 'client_socket' "
r"and bind 'address'. The original socket will not be rebound"
)
with caplog.at_level(logging.WARNING, logger='pynetdicom'):
sock = AssociationSocket(self.assoc,
client_socket='abc',
address=('', 11112))
assert msg in caplog.text
def test_close_connect(self):
"""Test closing and connecting."""
sock = AssociationSocket(self.assoc)
sock._is_connected = True
assert sock.socket is not None
sock.close()
assert sock.socket is None
# Tries to connect, sets to None if fails
sock.connect(('', 11112))
assert sock.event_queue.get() == 'Evt17'
assert sock.socket is None
def test_ready_error(self):
"""Test AssociationSocket.ready."""
sock = AssociationSocket(self.assoc)
assert sock.ready is False
sock._is_connected = True
assert sock.ready is True
sock.socket.close()
assert sock.ready is False
assert sock.event_queue.get() == 'Evt17'
def test_print(self):
"""Test str(AssociationSocket)."""
sock = AssociationSocket(self.assoc)
assert sock.__str__() == sock.socket.__str__()
def test_close_socket_none(self):
"""Test trying to close a closed socket."""
def handle_close(event):
event.assoc.dul.socket.socket = None
hh = [(evt.EVT_CONN_CLOSE, handle_close)]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11113), block=False, evt_handlers=hh)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('', 11113)
assert assoc.is_established
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_get_local_addr(self):
"""Test get_local_addr()."""
# Normal use
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('', 11113)
assert not assoc.is_established
assert isinstance(assoc.requestor.address, str)
# Exceptional use
assert not assoc.is_established
addr = assoc.dul.socket.get_local_addr(('', 111111))
assert '127.0.0.1' == addr
@pytest.fixture
def server_context(request):
"""Return a good server SSLContext."""
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=SERVER_CERT, keyfile=SERVER_KEY)
context.load_verify_locations(cafile=CLIENT_CERT)
# TLS v1.3 is not currently supported :(
# The actual available attributes/protocols depend on OS, OpenSSL version
# and Python version, ugh
if hasattr(ssl, 'TLSVersion'):
# This is the current and future, but heavily depends on OpenSSL
# Python 3.7+, w/ OpenSSL 1.1.0g+
context.maximum_version = ssl.TLSVersion.TLSv1_2
else:
# Should work with older Python and OpenSSL versions
# Python 2.7, 3.5, 3.6
context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=SERVER_CERT, keyfile=SERVER_KEY)
context.load_verify_locations(cafile=CLIENT_CERT)
return context
@pytest.fixture
def client_context(request):
"""Return a good client SSLContext."""
context = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH, cafile=SERVER_CERT)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=CLIENT_CERT, keyfile=CLIENT_KEY)
return context
class TestTLS(object):
"""Test using TLS to wrap the association."""
def setup(self):
self.ae = None
self.has_ssl = transport._HAS_SSL
def teardown(self):
if self.ae:
self.ae.shutdown()
# Ensure ssl module is available again
import importlib
try:
importlib.reload(pynetdicom.transport)
except AttributeError:
# Python 2
reload(pynetdicom.transport)
def test_tls_not_server_not_client(self):
"""Test associating with no TLS on either end."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
server = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
assert assoc.is_released
server.shutdown()
assert len(server.active_associations) == 0
def test_tls_not_server_yes_client(self, client_context):
"""Test wrapping the requestor socket with TLS (but not server)."""
self.ae = ae = AE()
ae.acse_timeout = 0.5
ae.dimse_timeout = 0.5
ae.network_timeout = 0.5
ae.add_supported_context('1.2.840.10008.1.1')
server = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('', 11112, tls_args=(client_context, None))
assert assoc.is_aborted
server.shutdown()
time.sleep(0.5)
assert len(server.active_associations) == 0
def test_tls_yes_server_not_client(self, server_context):
"""Test wrapping the acceptor socket with TLS (and not client)."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
server = ae.start_server(
('', 11112),
block=False,
ssl_context=server_context,
)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('', 11112)
assert assoc.is_aborted
server.shutdown()
assert len(server.active_associations) == 0
def test_tls_yes_server_yes_client(self, server_context, client_context):
"""Test associating with TLS on both ends."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context('1.2.840.10008.1.1')
server = ae.start_server(
('', 11112),
block=False,
ssl_context=server_context,
)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('', 11112, tls_args=(client_context, None))
assert assoc.is_established
assoc.release()
assert assoc.is_released
server.shutdown()
assert len(server.active_associations) == 0
def test_tls_transfer(self, server_context, client_context):
"""Test transferring data after associating with TLS."""
ds = []
def handle_store(event):
ds.append(event.dataset)
return 0x0000
handlers = [(evt.EVT_C_STORE, handle_store)]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_supported_context(RTImageStorage)
server = ae.start_server(
('', 11112),
block=False,
ssl_context=server_context,
evt_handlers=handlers
)
ae.add_requested_context('1.2.840.10008.1.1')
ae.add_requested_context(RTImageStorage)
assoc = ae.associate('', 11112, tls_args=(client_context, None))
assert assoc.is_established
status = assoc.send_c_store(DATASET)
assert status.Status == 0x0000
assoc.release()
assert assoc.is_released
server.shutdown()
assert len(ds[0].PixelData) == 2097152
@hide_modules(['ssl'])
def test_no_ssl_scp(self):
"""Test exception raised if no SSL available to Python as SCP."""
# Reload pynetdicom package
import importlib
try:
importlib.reload(pynetdicom.transport)
except AttributeError:
# Python 2
reload(pynetdicom.transport)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context('1.2.840.10008.1.1')
msg = r"Your Python installation lacks support for SSL"
with pytest.raises(RuntimeError, match=msg):
ae.start_server(
('', 11112),
block=False,
ssl_context=['random', 'object'],
)
@hide_modules(['ssl'])
def test_no_ssl_scu(self):
"""Test exception raised if no SSL available to Python as SCU."""
# Reload pynetdicom package
import importlib
try:
importlib.reload(pynetdicom.transport)
except AttributeError:
# Python 2
reload(pynetdicom.transport)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context('1.2.840.10008.1.1')
msg = r"Your Python installation lacks support for SSL"
with pytest.raises(RuntimeError, match=msg):
ae.associate('', 11112, tls_args=(['random', 'object'], None))
class TestAssociationServer(object):
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
@pytest.mark.skip()
def test_multi_assoc_block(self):
"""Test that multiple requestors can associate when blocking."""
self.ae = ae = AE()
ae.maximum_associations = 10
ae.add_supported_context('1.2.840.10008.1.1')
ae.start_server(('', 11112))
def test_multi_assoc_non(self):
"""Test that multiple requestors can association when non-blocking."""
self.ae = ae = AE()
ae.maximum_associations = 10
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assocs = []
for ii in range(10):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assocs.append(assoc)
for assoc in assocs:
assoc.release()
scp.shutdown()
def test_init_handlers(self):
"""Test AssociationServer.__init__()."""
def handle(event):
pass
def handle_echo(event):
return 0x0000
def handle_echo_b(event):
return 0x0000
self.ae = ae = AE()
handlers = [
(evt.EVT_DATA_RECV, handle),
(evt.EVT_DATA_RECV, handle),
(evt.EVT_C_ECHO, handle_echo),
(evt.EVT_C_ECHO, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo),
(evt.EVT_DATA_SENT, handle),
]
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert evt.EVT_DATA_RECV in scp._handlers
assert evt.EVT_C_ECHO in scp._handlers
# Duplicates not added
assert len(scp._handlers[evt.EVT_DATA_RECV]) == 1
# Multiples allowed
assert len(scp._handlers[evt.EVT_DATA_SENT]) == 3
# Only a single handler allowed
assert scp._handlers[evt.EVT_C_ECHO] == (handle_echo_b, None)
def test_get_events(self):
"""Test AssociationServer.get_events()."""
def handle(event):
pass
def handle_echo(event):
return 0x0000
def handle_echo_b(event):
return 0x0000
self.ae = ae = AE()
handlers = [
(evt.EVT_DATA_RECV, handle),
(evt.EVT_DATA_RECV, handle),
(evt.EVT_C_ECHO, handle_echo),
(evt.EVT_C_ECHO, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo),
(evt.EVT_DATA_SENT, handle),
]
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
bound_events = scp.get_events()
assert evt.EVT_DATA_RECV in bound_events
assert evt.EVT_DATA_SENT in bound_events
assert evt.EVT_C_ECHO in bound_events
scp.shutdown()
def test_get_handlers(self):
"""Test AssociationServer.get_handlers()."""
_config.LOG_HANDLER_LEVEL = 'none'
def handle(event):
pass
def handle_echo(event):
return 0x0000
def handle_echo_b(event):
return 0x0000
self.ae = ae = AE()
handlers = [
(evt.EVT_DATA_RECV, handle),
(evt.EVT_DATA_RECV, handle),
(evt.EVT_C_ECHO, handle_echo),
(evt.EVT_C_ECHO, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo),
(evt.EVT_DATA_SENT, handle),
]
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assert (handle, None) in scp.get_handlers(evt.EVT_DATA_SENT)
assert (handle_echo, None) in scp.get_handlers(evt.EVT_DATA_SENT)
assert (handle_echo_b, None) in scp.get_handlers(evt.EVT_DATA_SENT)
assert scp.get_handlers(evt.EVT_C_ECHO) == (handle_echo_b, None)
assert scp.get_handlers(evt.EVT_PDU_SENT) == []
scp.shutdown()
def test_shutdown(self):
"""test tring to shutdown a socket that's already closed."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
server = ae.start_server(('', 11112), block=False)
server.socket.close()
server.shutdown()
def test_exception_in_handler(self):
"""Test exc raised by the handler doesn't shut down the server."""
class DummyAE(object):
network_timeout = 5
_servers = []
dummy = DummyAE()
server = ThreadedAssociationServer(dummy, ('', 11112), b'a', [])
dummy._servers.append(server)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
ae = AE()
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('', 11112)
assert server.socket.fileno() != -1
server.shutdown()
if sys.version_info[0] == 2:
with pytest.raises(socket.error):
server.socket.fileno()
else:
assert server.socket.fileno() == -1
class TestEventHandlingAcceptor(object):
"""Test the transport events and handling as acceptor."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no transport event handlers bound."""
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
scp.shutdown()
def test_bind_evt_conn_open(self):
"""Test associations as acceptor with EVT_CONN_OPEN bound."""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(
('', 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)]
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assert event.event.name == 'EVT_CONN_OPEN'
assoc.release()
scp.shutdown()
def test_bind_evt_conn_open_running(self):
"""Test binding EVT_CONN_OPEN while running."""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(scp.active_associations) == 1
assert len(triggered_events) == 0
# Bind
scp.bind(evt.EVT_CONN_OPEN, on_conn_open)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc2 = ae.associate('localhost', 11112)
assert assoc2.is_established
assert len(scp.active_associations) == 2
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc2.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc2.get_handlers(evt.EVT_CONN_CLOSE) == []
child2 = scp.active_associations[1]
assert child2.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child2.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assoc.release()
assoc2.release()
scp.shutdown()
def test_unbind_evt_conn_open(self):
"""Test unbinding an event while running."""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(
('', 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)]
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
# Unbind
scp.unbind(evt.EVT_CONN_OPEN, on_conn_open)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc2 = ae.associate('localhost', 11112)
assert assoc2.is_established
assert len(scp.active_associations) == 2
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc2.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc2.get_handlers(evt.EVT_CONN_CLOSE) == []
child2 = scp.active_associations[1]
assert child2.get_handlers(evt.EVT_CONN_OPEN) == []
assert child2.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
assoc.release()
assoc2.release()
scp.shutdown()
def test_unbind_no_event(self):
"""Test unbinding if no event bound."""
def dummy(event):
pass
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
scp.unbind(evt.EVT_CONN_CLOSE, dummy)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
scp.shutdown()
def test_unbind_last_handler(self):
"""Test unbinding if no event bound."""
def dummy(event):
pass
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
scp.bind(evt.EVT_CONN_CLOSE, dummy)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(dummy, None)]
scp.unbind(evt.EVT_CONN_CLOSE, dummy)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert evt.EVT_CONN_CLOSE not in scp._handlers
scp.shutdown()
def test_conn_open_raises(self, caplog):
"""Test the handler for EVT_CONN_OPEN raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_CONN_OPEN, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_CONN_OPEN' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_bind_evt_conn_close(self):
"""Test associations as acceptor with EVT_CONN_CLOSE bound."""
triggered_events = []
def on_conn_close(event):
with threading.Lock():
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(
('', 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)]
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assert event.event.name == 'EVT_CONN_CLOSE'
scp.shutdown()
def test_bind_evt_conn_close_running(self):
"""Test binding EVT_CONN_CLOSE while running."""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False,)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
scp.bind(evt.EVT_CONN_CLOSE, on_conn_close)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc.release()
assert assoc.is_released
time.sleep(0.1)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
scp.shutdown()
def test_unbind_evt_conn_close(self):
"""Test unbinding EVT_CONN_CLOSE."""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(
('', 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)]
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
scp.unbind(evt.EVT_CONN_CLOSE, on_conn_close)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 0
scp.shutdown()
def test_conn_close_raises(self, caplog):
"""Test the handler for EVT_CONN_CLOSE raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_CONN_CLOSE, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_CONN_CLOSE' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_data_sent(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == 'EVT_DATA_SENT'
assert triggered[0].data[0:1] == b'\x02' # A-ASSOCIATE-AC
assert triggered[1].data[0:1] == b'\x06' # A-RELEASE-RP
scp.shutdown()
def test_data_sent_bind(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
scp.bind(evt.EVT_DATA_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == 'EVT_DATA_SENT'
assert event.data[0:1] == b'\x06' # A-RELEASE-RP
scp.shutdown()
def test_data_sent_unbind(self):
"""Test unbinding EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
scp.unbind(evt.EVT_DATA_SENT, handle)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
time.sleep(0.1)
assert len(triggered) == 1
assert triggered[0].data[0:1] == b'\x02' # A-ASSOCIATE-AC
scp.shutdown()
def test_data_sent_raises(self, caplog):
"""Test the handler for EVT_DATA_SENT raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_DATA_SENT' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_data_recv(self):
"""Test starting bound to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b'\x01' # Should be A-ASSOCIATE-RQ PDU
assert triggered[1].data[0:1] == b'\x05' # Should be A-RELEASE-RQ PDU
assert event.event.name == 'EVT_DATA_RECV'
scp.shutdown()
def test_data_recv_bind(self):
"""Test binding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
scp.bind(evt.EVT_DATA_RECV, handle)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.data[0:1] == b'\x05' # Should be A-RELEASE-RQ PDU
assert event.event.name == 'EVT_DATA_RECV'
scp.shutdown()
def test_data_recv_unbind(self):
"""Test unbinding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
scp.unbind(evt.EVT_DATA_RECV, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b'\x01' # Should be A-ASSOCIATE-RQ PDU
assert event.event.name == 'EVT_DATA_RECV'
scp.shutdown()
def test_data_recv_raises(self, caplog):
"""Test the handler for EVT_DATA_RECV raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_DATA_RECV' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
class TestEventHandlingRequestor(object):
"""Test the transport events and handling as requestor."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test associations as requestor with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
scp.shutdown()
def test_bind_evt_conn_open(self):
"""Test start with a bound EVT_CONN_OPEN"""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate(
'localhost', 11112,
evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assoc.release()
scp.shutdown()
def test_unbind_evt_conn_open(self):
"""Test unbinding EVT_CONN_OPEN"""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate(
'localhost', 11112,
evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.unbind(evt.EVT_CONN_OPEN, on_conn_open)
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assoc.release()
scp.shutdown()
def test_bind_evt_conn_close(self):
"""Test start with a bound EVT_CONN_CLOSED"""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate(
'localhost', 11112,
evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 0
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
scp.shutdown()
def test_bind_evt_conn_close_running(self):
"""Test binding EVT_CONN_CLOSED after assoc running."""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 0
assoc.bind(evt.EVT_CONN_CLOSE, on_conn_close)
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
scp.shutdown()
def test_unbind_evt_conn_close(self):
"""Test unbinding EVT_CONN_CLOSED"""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate(
'localhost', 11112,
evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.unbind(evt.EVT_CONN_CLOSE, on_conn_close)
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 0
scp.shutdown()
def test_connection_failure_log(self, caplog):
"""Test that a connection failure is logged."""
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('unknown', 11112)
assert assoc.is_aborted
messages = [
"Association request failed: unable to connect to remote",
"TCP Initialisation Error: Connection refused"
]
for msg in messages:
assert msg in caplog.text
scp.shutdown()
def test_data_sent(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assert assoc.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == 'EVT_DATA_SENT'
assert triggered[0].data[0:1] == b'\x01' # A-ASSOCIATE-RQ
assert triggered[1].data[0:1] == b'\x05' # A-RELEASE-RQ
scp.shutdown()
def test_data_sent_bind(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.bind(evt.EVT_DATA_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assert assoc.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == 'EVT_DATA_SENT'
assert event.data[0:1] == b'\x05' # A-RELEASE-RQ
scp.shutdown()
def test_data_sent_unbind(self):
"""Test unbinding EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assert assoc.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == []
assoc.unbind(evt.EVT_DATA_SENT, handle)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
assert triggered[0].data[0:1] == b'\x01' # A-ASSOCIATE-RQ
scp.shutdown()
def test_data_recv(self):
"""Test starting bound to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b'\x02' # Should be A-ASSOCIATE-AC PDU
assert triggered[1].data[0:1] == b'\x06' # Should be A-RELEASE-RP PDU
assert event.event.name == 'EVT_DATA_RECV'
scp.shutdown()
def test_data_recv_bind(self):
"""Test binding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assoc.bind(evt.EVT_DATA_RECV, handle)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.data[0:1] == b'\x06' # Should be A-RELEASE-RP PDU
assert event.event.name == 'EVT_DATA_RECV'
scp.shutdown()
def test_data_recv_unbind(self):
"""Test unbinding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc.unbind(evt.EVT_DATA_RECV, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b'\x02' # Should be A-ASSOCIATE-AC PDU
assert event.event.name == 'EVT_DATA_RECV'
scp.shutdown()
|
concurrency_test.py | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for concurrency."""
import threading
import time
from magenta.common import concurrency
import tensorflow.compat.v1 as tf
class ConcurrencyTest(tf.test.TestCase):
def testSleeper_SleepUntil(self):
# Burn in.
for _ in range(10):
concurrency.Sleeper().sleep(.01)
future_time = time.time() + 0.5
concurrency.Sleeper().sleep_until(future_time)
self.assertAlmostEqual(time.time(), future_time, delta=0.005)
def testSleeper_Sleep(self):
# Burn in.
for _ in range(10):
concurrency.Sleeper().sleep(.01)
def sleep_test_thread(duration):
start_time = time.time()
concurrency.Sleeper().sleep(duration)
self.assertAlmostEqual(time.time(), start_time + duration, delta=0.005)
threads = [threading.Thread(target=sleep_test_thread, args=[i * 0.1])
for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
tf.test.main()
|
eval1024Old.py | # System libs
import os
import argparse
from distutils.version import LooseVersion
from multiprocessing import Queue, Process
# Numerical libs
import numpy as np
import math
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from config import cfg
from dataset1024 import ValDataset
from models import ModelBuilder, SegmentationModule
from utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices, setup_logger
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy
from PIL import Image
from tqdm import tqdm
colors = loadmat('data/color150.mat')['colors']
def visualize_result(data, pred, dir_result):
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(pred, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))
def evaluate(segmentation_module, loader, cfg, gpu_id, result_queue):
segmentation_module.eval()
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu_id)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu_id)
# forward pass
scores_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
# calculate accuracy and SEND THEM TO MASTER
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)
result_queue.put_nowait((acc, pix, intersection, union))
# visualization
if cfg.VAL.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
pred,
os.path.join(cfg.DIR, 'result')
)
def worker(cfg, gpu_id, start_idx, end_idx, result_queue):
torch.cuda.set_device(gpu_id)
# Dataset and Loader
dataset_val = ValDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_val,
cfg.DATASET,
start_idx=start_idx, end_idx=end_idx)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=cfg.VAL.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=2)
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
segmentation_module.cuda()
# Main loop
evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)
def main(cfg, gpus):
with open(cfg.DATASET.list_val, 'r') as f:
lines = f.readlines()
num_files = len(lines)
num_files_per_gpu = math.ceil(num_files / len(gpus))
pbar = tqdm(total=num_files)
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
result_queue = Queue(500)
procs = []
for idx, gpu_id in enumerate(gpus):
start_idx = idx * num_files_per_gpu
end_idx = min(start_idx + num_files_per_gpu, num_files)
proc = Process(target=worker, args=(cfg, gpu_id, start_idx, end_idx, result_queue))
print('gpu:{}, start_idx:{}, end_idx:{}'.format(gpu_id, start_idx, end_idx))
proc.start()
procs.append(proc)
# master fetches results
processed_counter = 0
while processed_counter < num_files:
if result_queue.empty():
continue
(acc, pix, intersection, union) = result_queue.get()
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
processed_counter += 1
pbar.update(1)
for p in procs:
p.join()
# summary
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {:.4f}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'
.format(iou.mean(), acc_meter.average()*100))
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Validation"
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpus",
default="0-3",
help="gpus to use, e.g. 0-3 or 0,1,2,3"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
# absolute paths of model weights
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
if not os.path.isdir(os.path.join(cfg.DIR, "result")):
os.makedirs(os.path.join(cfg.DIR, "result"))
# Parse gpu ids
gpus = parse_devices(args.gpus)
gpus = [x.replace('gpu', '') for x in gpus]
gpus = [int(x) for x in gpus]
main(cfg, gpus)
|
game.py | import threading
import copy
import time
import random
import sys
import multiprocessing as mp
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
FREE = 0
WALL = 1
# Game state
STOPPED = 0
STARTED = 1
GAMEOVER = 2
class GameError(Exception):
pass
class PlaygroundError(Exception):
pass
class Playground(object):
FREE = 0
WALL = 1
def __init__(self, map):
if len(map) == 0:
raise PlaygroundError("Map has not rows")
for row in map:
if len(row) != len(map[0]):
raise PlaygroundError("Rows in map nave not got the same length")
self.map = map
self.width = len(map[0])
self.height = len(map)
self.points = []
def add_point(self, x, y):
if not self.is_free(x, y) or self.is_point(x, y):
raise PlaygroundError("Can not add point ({0}, {1})".format(x, y))
self.points.append((x, y))
def del_point(self, x, y):
if self.is_point(x, y):
self.points.remove((x, y))
else:
raise PlaygroundError("Can not remove point ({0}, {1})".format(x, y))
def is_point(self, x, y):
if not self.is_into(x, y):
return False
return (x, y) in self.points
def is_free(self, x, y):
if not self.is_into(x, y):
return False
return self.map[y][x] != WALL
def is_into(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
class PlayerInfo(object):
def __init__(self, player):
self.name = player.name
self.score = player.score
self.x = player.x
self.y = player.y
self.timeout = player.timeout
class Player(object):
def __init__(self, name, move_func, x=0, y=0):
self.name = name
self.score = 0
self.timeout = 0
self.move_func = move_func
# Function for move:
# move_function(info, ctx = None)
self.x = x
self.y = y
# Save history of all moves
self.history = []
self.history.append((x, y))
self.game_pipe, self.player_pipe = mp.Pipe()
self.process = None
# Are there any pending move requests
self.move_in_progress = False
self.n_move = 0
def set_position(self, x, y):
self.x = x
self.y = y
def get_position(self):
return (self.x, self.y)
def start_player(self):
self.process = mp.Process(target=self.move_processor)
self.process.start()
def stop_player(self, timeout=5):
if self.process and self.process.is_alive():
# Try to terminate process normally
self.game_pipe.send(None)
self.process.join(timeout)
# Send SIGTERM to the process
if self.process.is_alive():
self.process.terminate()
self.process.join(timeout)
def move_processor(self):
print "Process '{}' started".format(self.name)
self.ctx = {}
while True:
try:
#print('in')
request = self.player_pipe.recv()
#print('out')
except Exception as e:
print "ERROR. Process '{}' on pipe receive. {}.".format(self.name, e)
break
if request is None:
break
try:
response = self.move_func(request, self.ctx)
except Exception as e:
print "ERROR. Process '{}' on move function. {}.".format(self.name, e)
break
try:
self.player_pipe.send(response)
except Exception as e:
print "ERROR. Process '{}' on pipe send. {}.".format(self.name, e)
break
print "Process {} stopped".format(self.name)
def move_request(self, gameinfo):
self.n_move += 1
if self.move_in_progress:
self.timeout += 1
return
self.game_pipe.send(gameinfo)
self.move_in_progress = True
def move_result(self):
if self.move_in_progress:
if self.game_pipe.poll():
self.move_in_progress = False
return self.game_pipe.recv()
return None
class Game(object):
def __init__(self, playground, max_move, movetime=1):
self.state = STOPPED
self.playground = playground
self.movetime = movetime
self.max_move = max_move
self.n_move = 0
self.players = {}
self.lock = threading.Lock()
self.game_thread = threading.Thread(target=self._game)
self.stop = False
def add_player(self, player):
if self.state == STOPPED:
self.players[player.name] = player
else:
raise GameError("Can not add player. Game not in STOPPED state")
def do_player_move(self, player, move, start_game=False):
x, y = player.get_position()
if move == UP:
y -= 1
elif move == DOWN:
y += 1
elif move == LEFT:
x -= 1
elif move == RIGHT:
x += 1
elif not start_game:
return
self.lock.acquire()
if self.playground.is_free(x, y):
player.set_position(x, y)
if self.playground.is_point(x, y):
self.playground.del_point(x, y)
player.score += 1
if not start_game:
player.history.append((x, y))
self.lock.release()
def do_move(self):
self.n_move += 1
l_players = self.players.values()
random.shuffle(l_players)
for player in l_players:
info = self.player_info(player.name)
player.move_request(info)
time.sleep(self.movetime)
for player in l_players:
move = player.move_result()
if move is not None:
self.do_player_move(player, move)
def start_game(self):
for player in self.players.values():
player.start_player()
# Collect coins at statring game
self.do_player_move(player, 0, start_game=True)
self.game_thread.start()
self.state = STARTED
def stop_game(self):
# Stop game thread
self.stop = True
#sys.exit
self.game_thread.join()
# Stop all players
for player in self.players.values():
player.stop_player()
def _game(self):
while True:
self.do_move()
if self.is_gameover():
self.state = GAMEOVER
break
def is_gameover(self):
if len(self.playground.points) == 0 or self.n_move >= self.max_move or self.stop:
return True
return False
#def is_going(self):
#return self.game_thread.is_alive()
def player_info(self, player_name):
info = dict()
info["map"] = copy.deepcopy(self.playground.map)
info["coins"] = copy.deepcopy(self.playground.points)
info["players"] = [(p.x, p.y) for p in self.players.values() if p.name != player_name]
info["x"] = self.players[player_name].x
info["y"] = self.players[player_name].y
info["score"] = dict()
for p in self.players.values():
info["score"][p.name] = p.score
return info
def get_points(self):
self.lock.acquire()
points = copy.deepcopy(self.playground.points)
self.lock.release()
return points
def get_players(self):
self.lock.acquire()
players = [PlayerInfo(p) for p in self.players.values()]
self.lock.release()
return players
def get_gameinfo(self):
info = {
"move": self.n_move,
"max_move": self.max_move
}
return info |
manager.py | #!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
from typing import List, Tuple, Union
import cereal.messaging as messaging
import selfdrive.sentry as sentry
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init() -> None:
# update system time from panda
set_time(cloudlog)
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params: List[Tuple[str, Union[str, bytes]]] = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "0"),
("LongControlEnabled", "0"),
("MadModeEnabled", "0"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "1"),
("AutoLaneChangeEnabled", "1"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("KeepSteeringTurnSignals", "0"),
("NoMinLaneChangeSpeed", "0"),
("TurnSignals", "0"),
("ShowDebugUI", "0"),
("UseSMDPSHarness", "0"),
("SSCOD", "0"),
("DisableUpdates", "0"),
("LoggerEnabled", "0"),
("CleanUI", "1"),
("AR", "0"),
("UseLQR", "0"),
("PutPrebuilt", "0"),
("TPMS_Alerts", "1"),
("PutPrebuilt", "0"),
("StockNaviDecelEnabled", "0"),
("CustomLeadMark", "0"),
("HyundaiNaviSL", "0"),
("HapticFeedbackWhenSpeedCamera", "0"),
("DisableOpFcw", "0"),
("NewRadarInterface", "0"),
("LowSpeedAlerts", "1"),
("SteerLockout", "0"),
("SpasRspaEnabled", "0"),
("DynamicSpas", "0"),
("SPASDebug", "0"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE", "0"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not is_dirty():
os.environ['CLEAN'] = '1'
# init logging
sentry.init(sentry.SentryProject.SELFDRIVE)
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty(),
device=HARDWARE.get_device_type())
def manager_prepare() -> None:
for p in managed_processes.values():
p.prepare()
def manager_cleanup() -> None:
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread() -> None:
if EON:
Process(name="autoshutdownd", target=launcher, args=("selfdrive.autoshutdownd", "autoshutdownd")).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter", "road_speed_limiter")).start()
cloudlog.bind(daemon="manager")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
ignore: List[str] = []
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
shutdown = True
params.put("LastManagerExitReason", param)
cloudlog.warning(f"Shutting down manager - {param} set")
if shutdown:
break
def main() -> None:
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
sentry.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
try:
managed_processes['ui'].stop()
except Exception:
pass
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
cloud.py | # -*- coding: utf-8 -*-
# pylint: disable=broad-except, bare-except
import json
import time
import urllib
import threading
from datetime import datetime, timedelta
from .compat import s
from .const import LOGGER
try:
import http.client as httplib
except:
import httplib
class Cloud():
def __init__(self, root, server, key):
self.auth_key = key
self.server = server.replace("https://", "")
self._last_update = None
self._stopped = threading.Event()
self.update_interval = timedelta(minutes=1)
self._device_list = None
self._room_list = None
self._cloud_thread = None
self._last_post = datetime.now()
self._root = root
self.http_lock = threading.Lock()
def start(self):
self._cloud_thread = threading.Thread(target=self._update_loop)
self._cloud_thread.name = "Cloud"
self._cloud_thread.daemon = True
self._cloud_thread.start()
def stop(self):
self._stopped.set()
def _update_loop(self):
while not self._stopped.isSet():
try:
if self._last_update is None or \
datetime.now() - self._last_update \
> self.update_interval:
self._last_update = datetime.now()
LOGGER.debug("Update from cloud")
devices = self.get_device_list()
if devices:
self._device_list = devices
rooms = self.get_room_list()
if rooms:
self._room_list = rooms
else:
time.sleep(5)
except Exception as ex:
LOGGER.error("Error update cloud, %s", ex)
def _post(self, path, params=None, retry=0):
with self.http_lock:
while datetime.now() - self._last_post < timedelta(seconds=2):
time.sleep(1)
self._last_post = datetime.now()
json_body = None
params = params or {}
try:
LOGGER.debug("POST to Shelly Cloud")
conn = httplib.HTTPSConnection(self.server, timeout=5)
headers = {'Content-Type' : 'application/x-www-form-urlencoded'}
params["auth_key"] = self.auth_key
conn.request("POST", "/" + path, urllib.parse.urlencode(params),
headers)
resp = conn.getresponse()
if resp.status == 200:
body = resp.read()
json_body = json.loads(s(body))
else:
if retry < 2:
return self._post(path, params, retry + 1)
else:
LOGGER.warning("Error receive JSON from cloud, %s : %s", \
resp.reason, resp.read())
except Exception as ex:
LOGGER.warning("Error connect cloud, %s", ex)
finally:
if conn:
conn.close()
return json_body
def get_device_name(self, _id):
"""Return name using template for device"""
if self._device_list and _id in self._device_list:
dev = self._device_list[_id]
name = dev['name']
room = ""
try:
room_id = dev['room_id']
if room_id == '-10':
room = '[Hidden]'
elif room_id in self._room_list:
room = self._room_list[room_id]['name']
else:
room = str(room_id)
except:
pass
tmpl = self._root.tmpl_name
value = tmpl.format(id=id, name=name, room=room)
return value
return None
def get_room_name(self, _id):
"""Return room name of a device"""
room = None
if self._device_list and _id in self._device_list:
dev = self._device_list[_id]
try:
room_id = dev['room_id']
if room_id == '-10':
room = '[Hidden]'
elif room_id in self._room_list:
room = self._room_list[room_id]['name']
else:
room = str(room_id)
except:
pass
return room
def get_device_list(self):
return self._post("interface/device/list")['data']['devices']
def get_status(self):
self._post("device/all_status")
def get_room_list(self):
resp = self._post("interface/room/list")
return resp['data']['rooms']
def get_list_xxx(self):
rooms = self.get_room_list()
time.sleep(2)
resp = self._post("interface/device/list")
for _id, device in resp['data']['devices'].items():
room_id = device['room_id']
return resp['data']['devices']
|
query.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import annotations
import datetime
import itertools
import multiprocessing
from pathlib import Path
import aiida.common
import aiida.engine
import aiida.manage.configuration
import aiida.orm
from .data import PyRemoteArray, PyRemoteData
from .workflow import PyWorkChain
def workflows() -> aiida.orm.QueryBuilder:
"""Return an Aiida database query that will return all workflows."""
q = aiida.orm.QueryBuilder()
q.append(cls=PyWorkChain, tag="flow")
q.order_by({"flow": [{"ctime": {"order": "desc"}}]})
return q
def running_workflows() -> aiida.orm.QueryBuilder:
"""Return an Aiida database query that will return all running workflows."""
r = workflows()
r.add_filter(
"flow",
{
"attributes.process_state": {
"in": [
aiida.engine.ProcessState.RUNNING.value,
aiida.engine.ProcessState.WAITING.value,
],
}
},
)
return r
def recent_workflows(
days: int = 0, hours: int = 0, minutes: int = 0
) -> aiida.orm.QueryBuilder:
"""Return an Aiida database query for all recently started workflows.
Parameters
----------
days, hours, minutes
Any workflows started more recently than this many days/minutes/hours
will be included in the result of the query.
"""
delta = aiida.common.timezone.now() - datetime.timedelta(
days=days, hours=hours, minutes=minutes
)
r = workflows()
r.add_filter("flow", {"ctime": {">": delta}})
return r
def remote_files(
profile: str | None = None,
root: str | Path | None = None,
) -> set[Path]:
"""Return the paths of all RemoteData for the given profile.
Parameters
----------
profile
The profile name for which to return the UUIDs.
If not provided, runs on the currently loaded profile.
root
If provided, return only sub-paths of this root path.
Notes
-----
As Paths are returned without any information about what computer
the path refers to, this function is only useful in environments
where the Paths are globally unique.
"""
if profile:
aiida.load_profile(profile)
# PyRemoteData and PyRemoteArray are not in the 'data.core.remote'
# plugin path, so 'query.append' does not include them when querying
# for 'aiida.orm.RemoteData', despite the fact that they do subclass it.
remote_data = [aiida.orm.RemoteData, PyRemoteArray, PyRemoteData]
query = aiida.orm.QueryBuilder()
query.append(cls=remote_data, project="attributes.remote_path", tag="files")
if root:
root = Path(root).absolute()
query.add_filter("files", {"attributes.remote_path": {"like": f"{root}%"}})
return {Path(p) for p, in query.iterall()}
# Needs to be importable to be used with multiprocessing in 'referenced_remote_files'
def _run_on_q(f, q, *args):
try:
r = f(*args)
except Exception as e:
q.put(("error", e))
else:
q.put(("ok", r))
def referenced_remote_files(root: str | Path | None = None) -> set[Path]:
"""Return the paths of all RemoteData for all profiles.
Parameters
----------
root
If provided, return only sub-paths of this root path.
Notes
-----
As Paths are returned without any information about what computer
the path refers to, this function is only useful in environments
where the Paths are globally unique.
"""
# Loading different AiiDA profiles requires starting a fresh Python interpreter.
# For this reason we cannot use concurrent.futures, and must use bare
# multiprocessing.
# TODO: revisit whether this is necessary when AiiDA 2.0 is released
ctx = multiprocessing.get_context("spawn")
q = ctx.Queue()
profiles = aiida.manage.configuration.get_config().profile_names
procs = [
ctx.Process(target=_run_on_q, args=(remote_files, q, p, root)) for p in profiles
]
for proc in procs:
proc.start()
for proc in procs:
proc.join()
results = [q.get() for _ in range(q.qsize())]
if errors := [e for status, e in results if status != "ok"]:
raise ValueError(f"One or more processes errored: {errors}")
return set(itertools.chain.from_iterable(r for _, r in results))
def referenced_work_directories(root: str | Path) -> set[Path]:
"""Return all calcjob working directories referenced in the AiiDA database.
Notes
-----
As Paths are returned without any information about what computer
the path refers to, this function is only useful in environments
where the Paths are globally unique.
"""
root = Path(root).absolute()
# aiiDA shards working directory paths like '/path/to/.aiida_run/ab/cd/1234-...'
# so we add 3 subdirectories onto the root to get to the working directories.
n = len(root.parts) + 3
return {Path(*p.parts[:n]) for p in referenced_remote_files(root)}
def existing_work_directories(root: str | Path) -> set[Path]:
"""Return all calcjob working directories under 'root' that exist on disk.
Notes
-----
As Paths are returned without any information about what computer
the path refers to, this function is only useful in environments
where the Paths are globally unique.
Examples
--------
>>> work_directories("/path/to/my-user/.aiida_run")
{PosixPath('/path/to/my-user/.aiida_run/00/24/ab.c2-899c-4106-8c8e-74638dbdd71c')}
"""
root = Path(root).absolute()
# aiiDA shards working directory paths like '/path/to/.aiida_run/ab/cd/1234-...'
# so we add glob 3 subdirectories onto the root to get to the working directories.
return {Path(p) for p in root.glob("*/*/*")}
def unreferenced_work_directories(root: str | Path) -> set[Path]:
"""Return all unreferenced calcjob working directories under 'root'.
i.e. return all calcjob working directories that exist on disk, but are
not referenced in the AiiDA database.
Notes
-----
As Paths are returned without any information about what computer
the path refers to, this function is only useful in environments
where the Paths are globally unique.
Examples
--------
>>> unreferenced_work_directories("/path/to/my-user/.aiida_run")
{PosixPath('/path/to/my-user/.aiida_run/00/24/abc2-899c-4106-8c8e-74638dbdd71c')}
"""
root = Path(root).absolute()
return existing_work_directories(root) - referenced_work_directories(root)
def computer_work_directory(computer: str | aiida.orm.Computer) -> Path:
"""Return the work directory for 'computer'.
Like 'computer.get_workdir()', except that '{username}' template
parameters are replaced with actual usernames.
Parameters
----------
computer
A Computer instance, or a computer label.
"""
if not isinstance(computer, aiida.orm.Computer):
computer = aiida.orm.load_computer(computer)
with computer.get_transport() as t:
return Path(computer.get_workdir().format(username=t.whoami()))
|
manager.py | #!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
neos_update_required = os.path.isfile("/init.qcom.rc") \
and (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 8)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
os.system(os.path.join(BASEDIR, "installer", "updater", "updater"))
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
import zmq
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"ubloxd": "selfdrive.locationd.ubloxd",
"mapd": "selfdrive.mapd.mapd",
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./start.sh"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'gpsd',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'visiond',
'proclogd',
'ubloxd',
'mapd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
global gctx
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
context = zmq.Context()
thermal_sock = messaging.sub_sock(context, service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
params = Params()
logger_dead = False
while 1:
# get health of board, log this in "thermal"
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
for p in running:
cloudlog.debug(" running %s %s" % (p, running[p]))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.iterkeys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['plannerd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
mapviewer.py | #!/usr/bin/env python
"""
GUI for displaying maps from HDF5 files
"""
import os
import platform
import sys
import time
import json
import socket
import datetime
from functools import partial
from threading import Thread
from collections import OrderedDict, namedtuple
import wx
from wx.adv import AboutBox, AboutDialogInfo
import wx.lib.scrolledpanel as scrolled
import wx.lib.mixins.inspection
import wx.dataview as dv
DVSTY = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES
HAS_EPICS = False
try:
from epics import caput
HAS_EPICS = True
except:
pass
import numpy as np
import scipy.stats as stats
#from matplotlib.widgets import Slider, Button, RadioButtons
from wxmplot import PlotFrame
import larch
from larch.larchlib import read_workdir, save_workdir
from larch.wxlib import (LarchPanel, LarchFrame, EditableListBox, SimpleText,
FloatCtrl, Font, pack, Popup, Button, MenuItem,
Choice, Check, GridPanel, FileSave, HLine, flatnotebook,
HLine, OkCancel, LEFT, LarchUpdaterDialog, LarchWxApp)
from larch.utils.strutils import bytes2str, version_ge
from larch.io import nativepath
from larch.site_config import icondir
from ..xrd import lambda_from_E, xrd1d, save1D, calculate_xvalues
from ..xrmmap import GSEXRM_MapFile, GSEXRM_FileStatus, h5str, ensure_subgroup, DEFAULT_XRAY_ENERGY
from ..apps import check_larchversion, update_larch
from ..epics import pv_fullname
from ..wxlib.xrfdisplay import XRFDisplayFrame
from .mapimageframe import MapImageFrame, CorrelatedMapFrame
from .mapmathpanel import MapMathPanel
from .maptomopanel import TomographyPanel
from .mapxrfpanel import XRFAnalysisPanel
from .maptomopanel import TomographyPanel
from ..wxxrd import XRD1DViewerFrame, XRD2DViewerFrame
def timestring():
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
FONTSIZE = 8
if platform.system() in ('Windows', 'Darwin'):
FONTSIZE = 10
CEN = wx.ALIGN_CENTER
LEFT = wx.ALIGN_LEFT
RIGHT = wx.ALIGN_RIGHT
ALL_CEN = wx.ALL|CEN
ALL_LEFT = wx.ALL|LEFT
ALL_RIGHT = wx.ALL|RIGHT
FILE_WILDCARDS = 'X-ray Maps (*.h5)|*.h5|All files (*.*)|*.*'
XRF_ICON_FILE = 'gse_xrfmap.ico'
NOT_OWNER_MSG = """The File
'%s'
appears to be open by another process. Having two
processes writing to the file can cause corruption.
Do you want to take ownership of the file?
"""
NOT_GSEXRM_FILE = """The File
'%s'
doesn't seem to be a Map File
"""
NOT_GSEXRM_FOLDER = """The Folder
'%s'
doesn't seem to be a Map Folder
"""
FILE_ALREADY_READ = """The File
'%s'
has already been read.
"""
FRAMESTYLE = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL
BEAMLINE = '13-ID-E'
FACILITY = 'APS'
PLOT_TYPES = ('Single ROI Map', 'Three ROI Map', 'Correlation Plot')
PROCROWS_CHOICES = ('All', '500', '200', '100', '50', '20', '10')
PLOT_OPERS = ('/', '*', '-', '+')
ESCAN_CRED = os.environ.get('ESCAN_CREDENTIALS', None)
if ESCAN_CRED is not None:
try:
from ..epics.larchscan import connect_scandb
except ImportError:
ESCAN_CRED = None
CWID = 150
WWID = 100 + CWID*4
class MapPanel(GridPanel):
'''Panel of Controls for viewing maps'''
label = 'ROI Map'
def __init__(self, parent, owner=None, **kws):
self.owner = owner
self.cfile, self.xrmmap = None,None
self.last_process_time = 0
self.detectors_set = False
GridPanel.__init__(self, parent, nrows=8, ncols=6, **kws)
self.plot_choice = Choice(self, choices=PLOT_TYPES, size=(CWID, -1))
self.plot_choice.Bind(wx.EVT_CHOICE, self.plotSELECT)
self.det_choice = [Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1))]
self.roi_choice = [Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1)),
Choice(self, size=(CWID, -1))]
for i,det_chc in enumerate(self.det_choice):
det_chc.Bind(wx.EVT_CHOICE, partial(self.detSELECT,i))
for i,roi_chc in enumerate(self.roi_choice):
roi_chc.Bind(wx.EVT_CHOICE, partial(self.roiSELECT,i))
self.det_label = [SimpleText(self,'Intensity'),
SimpleText(self,''),
SimpleText(self,''),
SimpleText(self, 'Normalization')]
self.roi_label = [SimpleText(self,''),
SimpleText(self,''),
SimpleText(self,''),
SimpleText(self,'')]
fopts = dict(minval=-50000, precision=0, size=(70, -1))
self.lims = [FloatCtrl(self, value= 0, **fopts),
FloatCtrl(self, value=-1, **fopts),
FloatCtrl(self, value= 0, **fopts),
FloatCtrl(self, value=-1, **fopts)]
self.zigoff = FloatCtrl(self, value=0, minval=-15, maxval=15,
precision=0, size=(70, -1))
for wid in self.lims:
wid.Disable()
self.use_dtcorr = Check(self, default=True,
label='Correct for Detector Deadtime',
action=self.onDTCorrect)
self.use_hotcols = Check(self, default=False,
label='Remove First and Last columns',
action=self.onHotCols)
self.use_zigzag = Check(self, default=False, label='Fix ZigZag',
action=self.onZigZag)
self.limrange = Check(self, default=False,
label=' Limit Map Range to Pixel Range:',
action=self.onLimitRange)
map_shownew = Button(self, 'Show New Map', size=(CWID, -1),
action=partial(self.onROIMap, new=True))
map_update = Button(self, 'Replace Last Map', size=(CWID, -1),
action=partial(self.onROIMap, new=False))
self.mapproc_btn = Button(self, 'Add More Rows', size=(CWID, -1),
action=self.onProcessMap)
self.mapproc_nrows = Choice(self, choices=PROCROWS_CHOICES, size=(CWID, -1))
self.mapproc_nrows.SetStringSelection('100')
self.Add(SimpleText(self, 'Build Map From Raw Data Folder:'),
dcol=2, style=LEFT, newrow=True)
self.Add(self.mapproc_btn, dcol=1, style=LEFT)
self.Add(SimpleText(self, 'Max # Rows to Add:'), dcol=1,
style=LEFT, newrow=False)
self.Add(self.mapproc_nrows, dcol=1, style=LEFT)
self.Add(HLine(self, size=(WWID, 5)), dcol=8, style=LEFT, newrow=True)
self.Add((5, 5), newrow=True)
self.Add(SimpleText(self, 'Display ROI Maps: Plot Type:'), dcol=2,
style=LEFT, newrow=True)
self.Add(self.plot_choice, dcol=1, style=LEFT)
self.AddMany((SimpleText(self,''), self.det_label[0],
self.det_label[1], self.det_label[2], self.det_label[3]),
style=LEFT, newrow=True)
self.AddMany((SimpleText(self,'Detector:'), self.det_choice[0],
self.det_choice[1], self.det_choice[2], self.det_choice[3]),
style=LEFT, newrow=True)
self.AddMany((SimpleText(self,'ROI:'),self.roi_choice[0],
self.roi_choice[1],self.roi_choice[2], self.roi_choice[3]),
style=LEFT, newrow=True)
self.AddMany((SimpleText(self,''),self.roi_label[0],
self.roi_label[1],self.roi_label[2], self.roi_label[3]),
style=LEFT, newrow=True)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(map_shownew, dcol=1, style=LEFT)
self.Add(map_update, dcol=1, style=LEFT)
self.Add(HLine(self, size=(WWID, 5)), dcol=8, style=LEFT, newrow=True)
self.Add(SimpleText(self,'Options:'), dcol=1, style=LEFT, newrow=True)
self.Add(self.use_dtcorr, dcol=2, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(self.use_hotcols, dcol=2, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(self.use_zigzag, dcol=1, style=LEFT)
self.Add(self.zigoff, dcol=1, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(self.limrange, dcol=2, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(SimpleText(self, 'X Range:'), dcol=1, style=LEFT)
self.Add(self.lims[0], dcol=1, style=LEFT)
self.Add(self.lims[1], dcol=1, style=LEFT)
self.Add((5, 5), dcol=1, style=LEFT, newrow=True)
self.Add(SimpleText(self, 'Y Range:'), dcol=1, style=LEFT)
self.Add(self.lims[2], dcol=1, style=LEFT)
self.Add(self.lims[3], dcol=1, style=LEFT)
self.Add(HLine(self, size=(WWID, 5)), dcol=8, style=LEFT, newrow=True)
self.pack()
def onDTCorrect(self, event=None):
xrmfile = self.owner.current_file
if xrmfile is not None:
xrmfile.dtcorrect = self.use_dtcorr.IsChecked()
def onHotCols(self, event=None):
xrmfile = self.owner.current_file
if xrmfile is not None:
xrmfile.hotcols = self.use_hotcols.IsChecked()
def onZigZag(self, event=None):
xrmfile = self.owner.current_file
if xrmfile is not None:
zigzag = 0
if self.use_zigzag.IsChecked():
zigzag = int(self.zigoff.GetValue())
xrmfile.zigzag = zigzag
def update_xrmmap(self, xrmfile=None, set_detectors=False):
if xrmfile is None:
xrmfile = self.owner.current_file
self.cfile = xrmfile
self.xrmmap = self.cfile.xrmmap
if set_detectors or (not self.detectors_set):
self.set_det_choices()
self.plotSELECT()
def onLimitRange(self, event=None):
if self.limrange.IsChecked():
for wid in self.lims:
wid.Enable()
else:
for wid in self.lims:
wid.Disable()
def detSELECT(self,idet,event=None):
self.set_roi_choices(idet=idet)
def roiSELECT(self,iroi,event=None):
detname = self.det_choice[iroi].GetStringSelection()
roiname = self.roi_choice[iroi].GetStringSelection()
if version_ge(self.cfile.version, '2.0.0'):
try:
roi = self.cfile.xrmmap['roimap'][detname][roiname]
limits = roi['limits'][:]
units = bytes2str(roi['limits'].attrs.get('units',''))
roistr = '[%0.1f to %0.1f %s]' % (limits[0],limits[1],units)
except:
roistr = ''
else:
try:
roi = self.cfile.xrmmap[detname]
en = list(roi['energy'][:])
index = list(roi['roi_name'][:]).index(roiname)
limits = list(roi['roi_limits'][:][index])
roistr = '[%0.1f to %0.1f keV]' % (en[limits[0]],en[limits[1]])
except:
roistr = ''
self.roi_label[iroi].SetLabel(roistr)
def plotSELECT(self,event=None):
if len(self.owner.filemap) > 0:
plot_type = self.plot_choice.GetStringSelection().lower()
if 'single' in plot_type:
for i in (1, 2):
self.det_choice[i].Disable()
self.roi_choice[i].Disable()
self.roi_label[i].SetLabel('')
for i, label in enumerate(['Intensity', ' ', ' ']):
self.det_label[i].SetLabel(label)
elif 'three' in plot_type:
for i in (1, 2):
self.det_choice[i].Enable()
self.roi_choice[i].Enable()
for i, label in enumerate(['Red', 'Green', 'Blue']):
self.det_label[i].SetLabel(label)
self.set_roi_choices()
elif 'correl' in plot_type:
self.det_choice[1].Enable()
self.roi_choice[1].Enable()
self.det_choice[2].Disable()
self.roi_choice[2].Disable()
for i, label in enumerate([' X ',' Y ', '']):
self.det_label[i].SetLabel(label)
self.set_roi_choices()
def onClose(self):
for p in self.plotframes:
try:
p.Destroy()
except:
pass
def ShowMap(self, xrmfile=None, new=True):
subtitles = None
plt3 = 'three' in self.plot_choice.GetStringSelection().lower()
if xrmfile is None:
xrmfile = self.owner.current_file
self.onZigZag()
args={'hotcols' : xrmfile.hotcols,
'dtcorrect' : xrmfile.dtcorrect}
det_name, roi_name, plt_name = [], [], []
for det, roi in zip(self.det_choice, self.roi_choice):
det_name += [det.GetStringSelection()]
roi_name += [roi.GetStringSelection()]
if det_name[-1] == 'scalars':
plt_name += ['%s' % roi_name[-1]]
else:
plt_name += ['%s(%s)' % (roi_name[-1],det_name[-1])]
mapx = 1.0
if roi_name[-1] != '1':
mapx = xrmfile.get_roimap(roi_name[-1], det=det_name[-1], **args)
mapx[np.where(mapx==0)] = 1.
r_map = xrmfile.get_roimap(roi_name[0], det=det_name[0], **args)
if plt3:
g_map = xrmfile.get_roimap(roi_name[1], det=det_name[1], **args)
b_map = xrmfile.get_roimap(roi_name[2], det=det_name[2], **args)
x = xrmfile.get_pos(0, mean=True)
y = xrmfile.get_pos(1, mean=True)
pref, fname = os.path.split(xrmfile.filename)
if plt3:
map = np.array([r_map/mapx, g_map/mapx, b_map/mapx])
map = np.einsum('kij->ijk', map)
title = fname
info = ''
if roi_name[-1] == '1':
subtitles = {'red': 'Red: %s' % plt_name[0],
'green': 'Green: %s' % plt_name[1],
'blue': 'Blue: %s' % plt_name[2]}
else:
subtitles = {'red': 'Red: %s / %s' % (plt_name[0], plt_name[-1]),
'green': 'Green: %s / %s' % (plt_name[1], plt_name[-1]),
'blue': 'Blue: %s / %s' % (plt_name[2], plt_name[-1])}
else:
map = r_map/mapx
if roi_name[-1] == '1':
title = plt_name[0]
else:
title = '%s / %s' % (plt_name[0], plt_name[-1])
title = '%s: %s' % (fname, title)
info = 'Intensity: [%g, %g]' %(map.min(), map.max())
subtitle = None
det = None
if (plt3 and det_name[0]==det_name[1] and det_name[0]==det_name[2]) or not plt3:
for s in det_name[0]:
if s.isdigit(): det = int(s)
if len(self.owner.im_displays) == 0 or new:
iframe = self.owner.add_imdisplay(title, det=det)
xoff, yoff = 0, 0
if self.limrange.IsChecked():
lims = [wid.GetValue() for wid in self.lims]
map = map[lims[2]:lims[3], lims[0]:lims[1]]
xoff, yoff = lims[0], lims[2]
self.owner.display_map(map, title=title, info=info, x=x, y=y, det=det,
xoff=xoff, yoff=yoff, subtitles=subtitles,
xrmfile=self.cfile)
def onLasso(self, selected=None, mask=None, data=None, xrmfile=None, **kws):
if xrmfile is None:
xrmfile = self.owner.current_file
ny, nx = xrmfile.get_shape()
indices = []
for idx in selected:
iy, ix = divmod(idx, ny)
indices.append((ix, iy))
def ShowCorrel(self, xrmfile=None, new=True):
if xrmfile is None:
xrmfile = self.owner.current_file
self.onZigZag()
args={'hotcols' : xrmfile.hotcols,
'dtcorrect' : xrmfile.dtcorrect}
det_name,roi_name = [],[]
plt_name = []
xdet = self.det_choice[0].GetStringSelection()
xroi = self.roi_choice[0].GetStringSelection()
xlab = "%s(%s)" % (xroi, xdet)
if 'scalar' in xdet.lower():
xlab = xroi
ydet = self.det_choice[1].GetStringSelection()
yroi = self.roi_choice[1].GetStringSelection()
ylab = "%s(%s)" % (yroi, ydet)
if 'scalar' in ydet.lower():
ylab = yroi
map1 = xrmfile.get_roimap(xroi, det=xdet, **args)
map2 = xrmfile.get_roimap(yroi, det=ydet, **args)
x = xrmfile.get_pos(0, mean=True)
y = xrmfile.get_pos(1, mean=True)
pref, fname = os.path.split(xrmfile.filename)
title ='%s: %s vs. %s' %(fname, ylab, xlab)
correl_plot = CorrelatedMapFrame(parent=self.owner, xrmfile=xrmfile)
correl_plot.display(map1, map2, name1=xlab, name2=ylab,
x=x, y=y, title=title)
correl_plot.Show()
correl_plot.Raise()
self.owner.plot_displays.append(correl_plot)
def onProcessMap(self, event=None, max_new_rows=None):
xrmfile = self.owner.current_file
if xrmfile is None:
return
pref, fname = os.path.split(xrmfile.filename)
if max_new_rows is None:
max_new_rows = self.mapproc_nrows.GetStringSelection().lower()
if max_new_rows.lower() == 'all':
max_new_rows = None
else:
max_new_rows = int(max_new_rows)
self.owner.process_file(fname, max_new_rows=max_new_rows)
self.update_xrmmap(xrmfile=self.owner.current_file, set_detectors=True)
def onROIMap(self, event=None, new=True):
plotcmd = partial(self.ShowMap, new=new)
if 'correlation' in self.plot_choice.GetStringSelection().lower():
plotcmd = partial(self.ShowCorrel, new=new)
plotcmd()
def set_det_choices(self):
det_list = self.cfile.get_detector_list()
for det_ch in self.det_choice:
det_ch.SetChoices(det_list)
if 'scalars' in det_list: ## should set 'denominator' to scalars as default
self.det_choice[-1].SetStringSelection('scalars')
self.set_roi_choices()
self.detectors_set = True
def set_roi_choices(self, idet=None):
force_rois = not self.detectors_set
if idet is None:
for idet, det_ch in enumerate(self.det_choice):
detname = self.det_choice[idet].GetStringSelection()
rois = self.cfile.get_roi_list(detname, force=force_rois)
cur = self.roi_choice[idet].GetStringSelection()
self.roi_choice[idet].SetChoices(rois)
if cur in rois:
self.roi_choice[idet].SetStringSelection(cur)
self.roiSELECT(idet)
else:
detname = self.det_choice[idet].GetStringSelection()
rois = self.cfile.get_roi_list(detname, force=force_rois)
cur = self.roi_choice[idet].GetStringSelection()
self.roi_choice[idet].SetChoices(rois)
if cur in rois:
self.roi_choice[idet].SetStringSelection(cur)
self.roiSELECT(idet)
def update_roi(self, detname):
force = not self.detectors_set
return self.cfile.get_roi_list(detname, force=force)
class MapInfoPanel(scrolled.ScrolledPanel):
"""Info Panel """
label = 'Map Info'
def __init__(self, parent, owner=None, **kws):
scrolled.ScrolledPanel.__init__(self, parent, -1,
style=wx.GROW|wx.TAB_TRAVERSAL, **kws)
self.owner = owner
sizer = wx.GridBagSizer(3, 3)
self.wids = {}
ir = 0
for label in ('Facility','Run Cycle','Proposal Number','User group',
'H5 Map Created',
'Scan Time','File Compression','Map Data',
'Ring Current', 'X-ray Energy', 'X-ray Intensity (I0)',
'Original data path', 'User Comments 1', 'User Comments 2',
'Scan Fast Motor', 'Scan Slow Motor', 'Dwell Time',
'Sample Fine Stages',
'Sample Stage X', 'Sample Stage Y',
'Sample Stage Z', 'Sample Stage Theta',
'XRD Calibration'):
ir += 1
thislabel = SimpleText(self, '%s:' % label, style=wx.LEFT, size=(125, -1))
self.wids[label] = SimpleText(self, ' ' , style=wx.LEFT, size=(350, -1))
sizer.Add(thislabel, (ir, 0), (1, 1), 1)
sizer.Add(self.wids[label], (ir, 1), (1, 1), 1)
pack(self, sizer)
self.SetupScrolling()
def update_xrmmap(self, xrmfile=None, set_detectors=None):
if xrmfile is None:
xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
def time_between(d1, d2):
d1 = datetime.datetime.strptime(d1, "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(d2, "%Y-%m-%d %H:%M:%S")
diff = d2 - d1 if d2 > d1 else d1 - d2
return diff.days,diff.seconds
config_grp = ensure_subgroup('config',xrmmap)
notes_grp = ensure_subgroup('notes',config_grp)
time_str = bytes2str(notes_grp.attrs.get('h5_create_time',''))
self.wids['H5 Map Created'].SetLabel(time_str)
try:
d,s = time_between(bytes2str(notes_grp.attrs.get('scan_start_time','')),
bytes2str(notes_grp.attrs.get('scan_end_time','')))
time_str = str(datetime.timedelta(days=d,seconds=s))
except:
time_str = bytes2str(xrmmap.attrs.get('Start_Time',''))
self.wids['Scan Time'].SetLabel( time_str )
self.wids['File Compression'].SetLabel(bytes2str(xrmmap.attrs.get('Compression','')))
comments = h5str(xrmmap['config/scan/comments'][()]).split('\n', 2)
for i, comm in enumerate(comments):
self.wids['User Comments %i' %(i+1)].SetLabel(comm)
pos_addrs = [str(x) for x in xrmmap['config/positioners'].keys()]
pos_label = [h5str(x[()]) for x in xrmmap['config/positioners'].values()]
scan_pos1 = h5str(xrmmap['config/scan/pos1'][()])
scan_pos2 = h5str(xrmmap['config/scan/pos2'][()])
i1 = pos_addrs.index(scan_pos1)
i2 = pos_addrs.index(scan_pos2)
start1 = float(xrmmap['config/scan/start1'][()])
start2 = float(xrmmap['config/scan/start2'][()])
stop1 = float(xrmmap['config/scan/stop1'][()])
stop2 = float(xrmmap['config/scan/stop2'][()])
step1 = float(xrmmap['config/scan/step1'][()])
step2 = float(xrmmap['config/scan/step2'][()])
npts1 = int((abs(stop1 - start1) + 1.1*step1)/step1)
npts2 = int((abs(stop2 - start2) + 1.1*step2)/step2)
sfmt = '%s: [%.4f:%.4f], step=%.4f, %i pixels'
scan1 = sfmt % (pos_label[i1], start1, stop1, step1, npts1)
scan2 = sfmt % (pos_label[i2], start2, stop2, step2, npts2)
rowtime = float(xrmmap['config/scan/time1'][()])
self.wids['Scan Fast Motor'].SetLabel(scan1)
self.wids['Scan Slow Motor'].SetLabel(scan2)
pixtime = xrmfile.pixeltime
if pixtime is None:
pixtime = xrmfile.calc_pixeltime()
pixtime =int(round(1000.0*pixtime))
self.wids['Dwell Time'].SetLabel('%.1f ms per pixel' % pixtime)
env_names = list(xrmmap['config/environ/name'])
env_vals = list(xrmmap['config/environ/value'])
env_addrs = list(xrmmap['config/environ/address'])
fines = {'X': '?', 'Y': '?'}
i0vals = {'flux':'?', 'current':'?'}
en = xrmfile.get_incident_energy()
enmsg = '%0.1f eV (%0.3f \u00c5)' % (en, lambda_from_E(en, E_units='eV'))
if abs(en - DEFAULT_XRAY_ENERGY) < 1.0:
enmsg = "%s : PROBABLY NOT CORRECT" % enmsg
self.wids['X-ray Energy'].SetLabel(enmsg)
for name, addr, val in zip(env_names, env_addrs, env_vals):
name = bytes2str(name).lower()
val = h5str(val)
if 'ring_current' in name or 'ring current' in name:
self.wids['Ring Current'].SetLabel('%s mA' % val)
elif 'beamline.fluxestimate' in name or 'transmitted flux' in name:
i0vals['flux'] = val
elif 'i0 current' in name:
i0vals['current'] = val
elif name.startswith('sample'):
name = name.replace('samplestage.', '')
if 'coarsex' in name or 'coarse x' in name:
self.wids['Sample Stage X'].SetLabel('%s mm' % val)
elif 'coarsey' in name or 'coarse y' in name:
self.wids['Sample Stage Y'].SetLabel('%s mm' % val)
elif 'coarsez' in name or 'coarse z' in name:
self.wids['Sample Stage Z'].SetLabel('%s mm' % val)
elif 'theta' in name:
self.wids['Sample Stage Theta'].SetLabel('%s deg' % val)
elif 'finex' in name or 'fine x' in name:
fines['X'] = val
elif 'finey' in name or 'fine y' in name:
fines['Y'] = val
if i0vals['current'] == '?':
i0val = 'Flux=%(flux)s Hz' % i0vals
else:
i0val = u'Flux=%(flux)s Hz, I0 Current=%(current)s \u03BCA' % i0vals
self.wids['X-ray Intensity (I0)'].SetLabel(i0val)
self.wids['Sample Fine Stages'].SetLabel('X, Y = %(X)s, %(Y)s mm' % (fines))
folderpath = bytes2str(xrmmap.attrs.get('Map_Folder',''))
if len(folderpath) > 35:
folderpath = '...'+folderpath[-35:]
self.wids['Original data path'].SetLabel(folderpath)
self.wids['XRD Calibration'].SetLabel('')
xrd_calibration = ''
if 'xrd1d' in xrmmap:
xrd_calibration = bytes2str(xrmmap['xrd1d'].attrs.get('calfile',''))
if not os.path.exists(xrd_calibration):
xrd_calibration = ''
self.wids['XRD Calibration'].SetLabel(os.path.split(xrd_calibration)[-1])
notes = {}
config_grp = ensure_subgroup('config',xrmmap)
notes_grp = ensure_subgroup('notes',config_grp)
for key in notes_grp.attrs.keys():
try:
notes[key] = bytes2str(notes_grp.attrs[key])
except:
pass
note_title = ['Facility','Run Cycle','Proposal Number','User group']
note_str = ['','','','']
if 'beamline' in notes and 'facility' in notes:
note_str[0] = '%s @ %s' % (notes['beamline'],notes['facility'])
if 'run' in notes:
note_str[1] = notes['run']
if 'proposal' in notes:
note_str[2] = notes['proposal']
if 'user' in notes:
note_str[3] = notes['user']
for title,note in zip(note_title,note_str):
self.wids[title].SetLabel(note)
xrmfile.reset_flags()
if xrmfile.has_xrf:
if xrmfile.has_xrd2d and xrmfile.has_xrd1d:
datastr = 'XRF, 2D- and 1D-XRD data'
elif xrmfile.has_xrd2d:
datastr = 'XRF, 2D-XRD data'
elif xrmfile.has_xrd1d:
datastr = 'XRF, 1D-XRD data'
else:
datastr = 'XRF data'
else:
if xrmfile.has_xrd2d and xrmfile.has_xrd1d:
datastr = '2D- and 1D-XRD data'
elif xrmfile.has_xrd2d:
datastr = '2D-XRD data'
elif xrmfile.has_xrd1d:
datastr = '1D-XRD data'
else:
datastr = ''
self.wids['Map Data'].SetLabel(datastr)
def onClose(self):
pass
class MapAreaPanel(scrolled.ScrolledPanel):
label = 'Map Areas'
delstr = """ Delete Area '%s'?
WARNING: This cannot be undone!
"""
def __init__(self, parent, owner=None, **kws):
scrolled.ScrolledPanel.__init__(self, parent, -1,
style=wx.GROW|wx.TAB_TRAVERSAL, **kws)
######################################
## GENERAL MAP AREAS
self.owner = owner
pane = wx.Panel(self)
sizer = wx.GridBagSizer(3, 3)
self.choices = {}
bsize = (CWID, -1)
self.choice = Choice(pane, size=(225, -1), action=self.onSelect)
self.desc = wx.TextCtrl(pane, -1, '', size=(225, -1))
self.info1 = wx.StaticText(pane, -1, '', size=(275, -1))
self.info2 = wx.StaticText(pane, -1, '', size=(275, -1))
self.onmap = Button(pane, 'Show on Map', size=bsize, action=self.onShow)
self.clear = Button(pane, 'Clear Map', size=bsize, action=self.onClear)
self.bdelete = Button(pane, 'Delete', size=bsize, action=self.onDelete)
self.update = Button(pane, 'Apply', size=bsize, action=self.onLabel)
self.bexport = Button(pane, 'Export Areas', size=bsize, action=self.onExport)
self.bimport = Button(pane, 'Import Areas', size=bsize, action=self.onImport)
self.bcopy = Button(pane, 'Copy to Other Maps', size=bsize, action=self.onCopy)
self.xrf = Button(pane, 'Show XRF (Fore)', size=bsize, action=self.onXRF)
self.xrf2 = Button(pane, 'Show XRF (Back)', size=bsize,
action=partial(self.onXRF, as_mca2=True))
self.onstats = Button(pane, 'Calculate XRF Stats', size=bsize,
action=self.onShowStats)
self.onreport = Button(pane, 'Save XRF Stats', size=bsize,
action=self.onReport)
self.xrd1d_plot = Button(pane, 'Show 1D XRD', size=bsize,
action=partial(self.onXRD, show=True, xrd1d=True))
self.xrd2d_plot = Button(pane, 'Show 2D XRD', size=bsize,
action=partial(self.onXRD, show=True, xrd2d=True))
legend = wx.StaticText(pane, -1, 'Values in Counts per second', size=(200, -1))
def txt(s):
return SimpleText(pane, s)
irow = 1
sizer.Add(txt('Map Areas and Saved Points'), ( 0, 0), (1, 5), ALL_CEN, 2)
sizer.Add(txt('Area: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.choice, (irow, 1), (1, 2), ALL_LEFT, 2)
sizer.Add(self.bdelete, (irow, 3), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Info: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.info1, (irow, 1), (1, 2), ALL_LEFT, 2)
sizer.Add(self.info2, (irow, 3), (1, 2), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Rename: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.desc, (irow, 1), (1, 2), ALL_LEFT, 2)
sizer.Add(self.update, (irow, 3), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Show: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.onmap, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.clear, (irow, 2), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('Save: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.bexport, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.bimport, (irow, 2), (1, 1), ALL_LEFT, 2)
sizer.Add(self.bcopy, (irow, 3), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('XRF: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrf, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrf2, (irow, 2), (1, 1), ALL_LEFT, 2)
sizer.Add(self.onstats, (irow, 3), (1, 1), ALL_LEFT, 2)
sizer.Add(self.onreport, (irow, 4), (1, 1), ALL_LEFT, 2)
irow += 1
sizer.Add(txt('XRD: '), (irow, 0), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrd1d_plot, (irow, 1), (1, 1), ALL_LEFT, 2)
sizer.Add(self.xrd2d_plot, (irow, 2), (1, 1), ALL_LEFT, 2)
# sizer.Add(self.xrd1d_save, (irow, 0), (1, 2), ALL_LEFT, 2)
# sizer.Add(self.xrd2d_save, (irow, 2), (1, 2), ALL_LEFT, 2)
irow += 1
sizer.Add(legend, (irow, 1), (1, 2), ALL_LEFT, 2)
pack(pane, sizer)
for btn in (self.xrd1d_plot, self.xrd2d_plot):
btn.Disable()
# main sizer
msizer = wx.BoxSizer(wx.VERTICAL)
msizer.Add(pane, 0, wx.ALIGN_LEFT|wx.ALL, 1)
msizer.Add(wx.StaticLine(self, size=(375, 2), style=wx.LI_HORIZONTAL),
0, wx.EXPAND|wx.ALL, 1)
self.report = None
rep = self.report = dv.DataViewListCtrl(self, style=DVSTY)
rep.AppendTextColumn('ROI ', width=100)
rep.AppendTextColumn('Min', width=75)
rep.AppendTextColumn('Max', width=75)
rep.AppendTextColumn('Mean ', width=75)
rep.AppendTextColumn('Sigma', width=75)
rep.AppendTextColumn('Median', width=75)
rep.AppendTextColumn('Mode', width=75)
for col in range(7):
align = wx.ALIGN_RIGHT
if col == 0: align = wx.ALIGN_LEFT
rep.Columns[col].Sortable = False
rep.Columns[col].Renderer.Alignment = align
rep.Columns[col].Alignment = align
rep.SetMinSize((590, 300))
msizer.Add(rep, 1, wx.ALIGN_LEFT|wx.ALL, 1)
pack(self, msizer)
self.SetupScrolling()
def onCopy(self, event=None):
xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
print("Copy Area : shape", xrmfile, xrmmap.shape)
def show_stats(self):
# self.stats = self.xrmfile.get_area_stats(self.areaname)
if self.report is None:
return
self.report.DeleteAllItems()
self.report_data = []
def report_info(dname,d):
try:
hmean, gmean = stats.gmean(d), stats.hmean(d)
skew, kurtosis = stats.skew(d), stats.kurtosis(d)
except ValueError:
hmean, gmean, skew, kurtosis = 0, 0, 0, 0
smode = '--'
fmt = '{:,.1f}'.format # use thousands commas, 1 decimal place
mode = stats.mode(d)
if len(mode) > 0:
mode = mode[0]
if len(mode) > 0:
smode = fmt(mode[0])
dat = (dname, fmt(d.min()), fmt(d.max()), fmt(d.mean()),
fmt(d.std()), fmt(np.median(d)), smode)
self.report_data.append(dat)
self.report.AppendItem(dat)
areaname = self._getarea()
xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
ctime = xrmfile.pixeltime
area = xrmfile.get_area(name=areaname)
amask = area[()]
def match_mask_shape(det, mask):
if mask.shape[1] == det.shape[1] - 2: # hotcols
det = det[:,1:-1]
if mask.shape[0] < det.shape[0]:
det = det[:mask.shape[0]]
return det[mask]
if 'roistats' in area.attrs:
for dat in json.loads(area.attrs.get('roistats','')):
dat = tuple(dat)
self.report_data.append(dat)
self.report.AppendItem(dat)
self.choice.Enable()
return
version = xrmmap.attrs.get('Version','1.0.0')
if version_ge(version, '2.0.0'):
d_pref = 'mca'
d_scas = [d for d in xrmmap['scalars']]
detnames = ["%s%i" % (d_pref, i) for i in range(1, xrmfile.nmca+1)]
d_rois = xrmfile.get_roi_list(detnames[0])
else:
d_addrs = [d.lower() for d in xrmmap['roimap/det_address']]
d_names = [d for d in xrmmap['roimap/det_name']]
d_pref = 'det'
for i in range(1, xrmfile.nmca+1):
tname = '%s%i/realtime' % (d_pref, i)
rtime = xrmmap[tname][()]
if amask.shape[1] == rtime.shape[1] - 2: # hotcols
rtime = rtime[:,1:-1]
if version_ge(version, '2.0.0'):
for scalar in d_scas:
d = xrmmap['scalars'][scalar][()]
d = match_mask_shape(d, amask)
report_info(scalar, d/ctime)
for roi in d_rois:
for det in detnames:
d = xrmfile.get_roimap(roi, det=det, dtcorrect=False)
d = match_mask_shape(d, amask)
report_info('%s (%s)' % (roi, det), d/ctime)
else:
for idet, dname in enumerate(d_names):
try:
daddr = h5str(d_addrs[idet])
except IndexError:
break
if 'mca' in daddr:
det = 1
words = daddr.split('mca')
if len(words) > 1:
det = int(words[1].split('.')[0])
d = xrmmap['roimap/det_raw'][:,:,idet]
d = match_mask_shape(d, amask)
report_info(dname, d/ctime)
if 'roistats' not in area.attrs:
area.attrs['roistats'] = json.dumps(self.report_data)
xrmfile.h5root.flush()
def update_xrmmap(self, xrmfile=None, set_detectors=None):
if xrmfile is None: xrmfile = self.owner.current_file
xrmmap = xrmfile.xrmmap
self.set_area_choices(xrmmap, show_last=True)
self.set_enabled_btns(xrmfile=xrmfile)
self.report.DeleteAllItems()
self.report_data = []
try:
self.onSelect()
except:
pass
def set_enabled_btns(self, xrmfile=None):
if xrmfile is None:
xrmfile = self.owner.current_file
xrmfile.reset_flags()
self.xrd2d_plot.Enable(xrmfile.has_xrd1d)
self.xrd1d_plot.Enable(xrmfile.has_xrd1d)
def clear_area_choices(self):
self.info1.SetLabel('')
self.info2.SetLabel('')
self.desc.SetValue('')
self.choice.Clear()
def set_area_choices(self, xrmmap, show_last=False):
self.clear_area_choices()
areas = xrmmap['areas']
c = self.choice
c.Clear()
self.choices = {}
choice_labels = []
for a in areas:
desc = bytes2str(areas[a].attrs.get('description', a))
self.choices[desc] = a
choice_labels.append(desc)
c.AppendItems(choice_labels)
this_label = ''
if len(self.choices) > 0:
idx = 0
if show_last:
idx = len(self.choices)-1
try:
this_label = choice_labels[idx]
except:
return
c.SetStringSelection(this_label)
self.desc.SetValue(this_label)
def onReport(self, event=None):
aname = self._getarea()
path, fname = os.path.split(self.owner.current_file.filename)
deffile = '%s_%s' % (fname, aname)
deffile = deffile.replace('.', '_') + '.dat'
outfile = FileSave(self, 'Save Area XRF Statistics File',
default_file=deffile,
wildcard=FILE_WILDCARDS)
if outfile is None:
return
area = self.owner.current_file.xrmmap['areas/%s' % aname]
npix = area[()].sum()
pixtime = self.owner.current_file.pixeltime
mca = self.owner.current_file.get_mca_area(aname)
dtime = mca.real_time
info_fmt = '%i Pixels, %i ms/pixel, %.3f total seconds'
buff = ['# Map %s, Area %s' % (self.owner.current_file.filename, aname),
'# %i Pixels' % npix,
'# %i ms per pixel' % int(round(1000.0*pixtime)),
'# %.3f total seconds' % dtime,
'# Time (TSCALER) in ms',
'# All other values in counts per second',
'#----------------------------------',
'# ROI Min Max Mean Sigma Median Mode']
for dat in self.report_data:
buff.append(' '.join(dat))
buff.append('')
try:
fout = open(outfile, 'w')
fout.write('\n'.join(buff))
fout.close()
except IOError:
print('could not write %s' % outfile)
def _getarea(self):
return self.choices[self.choice.GetStringSelection()]
def onExport(self, event=None):
ofile = self.owner.current_file.export_areas()
self.owner.message('Exported Areas to %s' % ofile)
def onImport(self, event=None):
wildcards = 'Area Files (*_Areas.npz)|*_Areas.npz|All files (*.*)|*.*'
dlg = wx.FileDialog(self, message='Read Areas File',
defaultDir=os.getcwd(),
wildcard=wildcards, style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath().replace('\\', '/')
self.owner.current_file.import_areas(fname)
self.owner.message('Imported Areas from %s' % fname)
self.set_area_choices(self.owner.current_file.xrmmap)
self.onSelect()
def onSelect(self, event=None):
try:
aname = self._getarea()
except:
return
area = self.owner.current_file.xrmmap['areas/%s' % aname]
npix = area[()].sum()
yvals, xvals = np.where(area[()])
pixtime = self.owner.current_file.pixeltime
dtime = npix*pixtime
info1_fmt = '%i Pixels, %.3f seconds'
info2_fmt = ' Range (pixels) X: [%i:%i], Y: [%i:%i] '
self.info1.SetLabel(info1_fmt % (npix, dtime))
self.info2.SetLabel(info2_fmt % (xvals.min(), xvals.max(),
yvals.min(), yvals.max()))
self.desc.SetValue(area.attrs.get('description', aname))
self.report.DeleteAllItems()
self.report_data = []
if 'roistats' in area.attrs:
self.show_stats()
def onShowStats(self, event=None):
if self.report is None:
return
self.show_stats()
def onLabel(self, event=None):
aname = self._getarea()
area = self.owner.current_file.xrmmap['areas/%s' % aname]
new_label = str(self.desc.GetValue())
area.attrs['description'] = new_label
self.owner.current_file.h5root.flush()
self.set_area_choices(self.owner.current_file.xrmmap)
self.choice.SetStringSelection(new_label)
self.desc.SetValue(new_label)
def onShow(self, event=None):
aname = self._getarea()
area = self.owner.current_file.xrmmap['areas'][aname]
label = bytes2str(area.attrs.get('description', aname))
if len(self.owner.tomo_displays) > 0:
imd = self.owner.tomo_displays[-1]
try:
imd.add_highlight_area(area[()], label=label)
except:
pass
if len(self.owner.im_displays) > 0:
imd = self.owner.im_displays[-1]
h, w = self.owner.current_file.get_shape()
highlight = np.zeros((h, w))
highlight[np.where(area[()])] = 1
imd.panel.add_highlight_area(highlight, label=label)
def onDelete(self, event=None):
aname = self._getarea()
erase = (wx.ID_YES == Popup(self.owner, self.delstr % aname,
'Delete Area?', style=wx.YES_NO))
if erase:
xrmmap = self.owner.current_file.xrmmap
del xrmmap['areas/%s' % aname]
self.set_area_choices(xrmmap)
self.onSelect()
def onClear(self, event=None):
if len(self.owner.im_displays) > 0:
imd = self.owner.im_displays[-1]
try:
for area in imd.panel.conf.highlight_areas:
for w in area.collections + area.labelTexts:
w.remove()
imd.panel.conf.highlight_areas = []
imd.panel.redraw()
except:
pass
if len(self.owner.tomo_displays) > 0:
imd = self.owner.tomo_displays[-1]
try:
imd.clear_highlight_area()
except:
pass
def onXRF(self, event=None, as_mca2=False):
aname = self._getarea()
xrmfile = self.owner.current_file
area = xrmfile.xrmmap['areas/%s' % aname]
label = bytes2str(area.attrs.get('description', aname))
self._mca = None
self.owner.message("Getting XRF Spectra for area '%s'..." % aname)
def _getmca_area(aname):
o = self.owner
self._mca = o.current_file.get_mca_area(aname,
dtcorrect=o.dtcor)
mca_thread = Thread(target=_getmca_area, args=(aname,))
mca_thread.start()
self.owner.show_XRFDisplay()
mca_thread.join()
pref, fname = os.path.split(self.owner.current_file.filename)
npix = area[()].sum()
self._mca.filename = fname
self._mca.title = label
self._mca.npixels = npix
self.owner.message("Plotting XRF Spectra for area '%s'..." % aname)
self.owner.xrfdisplay.add_mca(self._mca, label="%s:%s" % (fname, label),
plot=not as_mca2)
if as_mca2:
self.owner.xrfdisplay.swap_mcas()
def onXRD(self, event=None, save=False, show=False,
xrd1d=False, xrd2d=False, verbose=True):
try:
aname = self._getarea()
xrmfile = self.owner.current_file
area = xrmfile.xrmmap['areas/%s' % aname]
title = area.attrs.get('description', aname)
env_names = list(xrmfile.xrmmap['config/environ/name'])
env_vals = list(xrmfile.xrmmap['config/environ/value'])
for name, val in zip(env_names, env_vals):
if 'mono.energy' in str(name).lower():
energy = float(val)/1000.
except:
if verbose:
print('No map file and/or areas specified.')
return
xrmfile.reset_flags()
if not xrmfile.has_xrd1d and not xrmfile.has_xrd2d:
if verbose:
print('No XRD data in map file: %s' % self.owner.current_file.filename)
return
ponifile = bytes2str(xrmfile.xrmmap['xrd1d'].attrs.get('calfile',''))
ponifile = ponifile if os.path.exists(ponifile) else None
if show:
self.owner.message('Plotting XRD pattern for \'%s\'...' % title)
if save:
self.owner.message('Saving XRD pattern for \'%s\'...' % title)
path,stem = os.path.split(self.owner.current_file.filename)
stem = '%s_%s' % (stem,title)
kwargs = dict(filename=self.owner.current_file.filename,
npixels = area[()].sum(),
energy = 0.001*xrmfile.get_incident_energy(),
calfile = ponifile, title = title, xrd2d=False)
if xrd1d and xrmfile.has_xrd1d:
self._xrd = xrmfile.get_xrd1d_area(aname, **kwargs)
if show:
label = '%s: %s' % (os.path.split(self._xrd.filename)[-1], title)
self.owner.display_xrd1d(self._xrd.data1D, self._xrd.q,
self._xrd.energy, label=label)
if save:
wildcards = '1D XRD file (*.xy)|*.xy|All files (*.*)|*.*'
dlg = wx.FileDialog(self, 'Save file as...',
defaultDir=os.getcwd(),
defaultFile='%s.xy' % stem,
wildcard=wildcards,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
print('\nSaving 1D XRD in file: %s' % (filename))
save1D(filename, self._xrd.data1D[0], self._xrd.data1D[1], calfile=ponifile)
## turns off flag since it has already been displayed/saved
xrd1d = False
if xrd2d:
print("Looking for 2D XRD Data")
try:
_xrd = xrmfile.get_xrd2d_area(aname, **kwargs)
except:
_xrd = None
if _xrd is None:
print("no 2D XRD Data")
return
label = '%s: %s' % (os.path.split(_xrd.filename)[-1], title)
self.owner.display_2Dxrd(_xrd.data2D, label=label, xrmfile=xrmfile)
wildcards = '2D XRD file (*.tiff)|*.tif;*.tiff;*.edf|All files (*.*)|*.*'
fname = xrmfile.filename + '_' + aname
dlg = wx.FileDialog(self, 'Save file as...',
defaultDir=os.getcwd(),
defaultFile='%s.tiff' % fname,
wildcard=wildcards,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
filename = os.path.abspath(dlg.GetPath().replace('\\', '/'))
_xrd.save_2D(file=filename, verbose=True)
dlg.Destroy()
class MapViewerFrame(wx.Frame):
cursor_menulabels = {'lasso': ('Select Points for XRF Spectra\tCtrl+X',
'Left-Drag to select points for XRF Spectra')}
def __init__(self, parent=None, filename=None, _larch=None,
use_scandb=False, version_info=None,
size=(925, 650), **kwds):
kwds['style'] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, parent, -1, size=size, **kwds)
self.data = None
self.use_scandb = use_scandb
self.filemap = {}
self.im_displays = []
self.tomo_displays = []
self.plot_displays = []
self.current_file = None
self.larch_buffer = parent
if not isinstance(parent, LarchFrame):
self.larch_buffer = LarchFrame(_larch=_larch, is_standalone=False, with_raise=False)
self.larch = self.larch_buffer.larchshell
self.xrfdisplay = None
self.xrddisplay1D = None
self.xrddisplay2D = None
self.watch_files = False
self.files_in_progress = []
# self.hotcols = False
self.dtcor = True
self.showxrd = False
self.SetTitle('GSE XRM MapViewer')
self.createMainPanel()
self.SetFont(Font(FONTSIZE))
self.createMenus()
self.statusbar = self.CreateStatusBar(2, 0)
self.statusbar.SetStatusWidths([-3, -1])
statusbar_fields = ['Initializing....', ' ']
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
self.htimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.onTimer, self.htimer)
self.h5convert_done = True
self.h5convert_irow = 0
self.h5convert_nrow = 0
read_workdir('gsemap.dat')
w0, h0 = self.GetSize()
w1, h1 = self.GetBestSize()
self.SetSize((max(w0, w1)+5, max(h0, h1)+5))
self.SetMinSize((500, 300))
self.Show()
self.scandb = None
self.instdb = None
self.inst_name = None
self.move_callback = None
if version_info is not None:
if version_info.update_available:
self.onCheckforUpdates()
if filename is not None:
wx.CallAfter(self.onRead, filename)
def CloseFile(self, filename, event=None):
if filename in self.filemap:
self.filemap[filename].close()
self.filemap.pop(filename)
def createMainPanel(self):
splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
splitter.SetMinimumPaneSize(250)
self.filelist = EditableListBox(splitter, self.ShowFile,
remove_action=self.CloseFile,
size=(250, -1))
dpanel = self.detailspanel = wx.Panel(splitter)
self.createNBPanels(dpanel)
splitter.SplitVertically(self.filelist, self.detailspanel, 1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(splitter, 1, wx.GROW|wx.ALL, 5)
pack(self, sizer)
fico = os.path.join(icondir, XRF_ICON_FILE)
try:
self.SetIcon(wx.Icon(fico, wx.BITMAP_TYPE_ICO))
except:
pass
self.Raise()
wx.CallAfter(self.init_larch)
def createNBPanels(self, parent):
self.title = SimpleText(parent, 'initializing...', size=(680, -1))
self.SetBackgroundColour('#F0F0E8')
nbpanels = OrderedDict()
for panel in (MapPanel, MapInfoPanel, MapAreaPanel, MapMathPanel,
TomographyPanel, XRFAnalysisPanel):
nbpanels[panel.label] = panel
self.nb = flatnotebook(parent, nbpanels, panelkws={'owner':self},
on_change=self.onNBChanged)
self.roimap_panel = self.nb.GetPage(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.title, 0, ALL_CEN)
sizer.Add(self.nb, 1, wx.ALL|wx.EXPAND)
parent.SetSize((700, 400))
pack(parent, sizer)
def onNBChanged(self, event=None):
cb = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(cb):
cb()
def get_mca_area(self, mask, xoff=0, yoff=0, det=None, xrmfile=None):
if xrmfile is None:
xrmfile = self.current_file
aname = xrmfile.add_area(mask)
self.sel_mca = xrmfile.get_mca_area(aname, det=det)
def lassoHandler(self, mask=None, xrmfile=None, xoff=0, yoff=0,
det=None, **kws):
if xrmfile is None:
xrmfile = self.current_file
ny, nx = xrmfile.get_shape()
if mask.sum() < 1:
return
if (xoff>0 or yoff>0) or mask.shape != (ny, nx):
if mask.shape == (nx, ny): ## sinogram
mask = np.swapaxes(mask,0,1)
# elif mask.shape == (ny, ny) or mask.shape == (nx, nx): ## tomograph
# tomo = True
else:
ym, xm = mask.shape
tmask = np.zeros((ny, nx)).astype(bool)
xmax = min(nx, xm+xoff)
for iy in range(ym):
if iy+yoff < ny:
tmask[iy+yoff, xoff:xmax] = mask[iy]
mask = tmask
kwargs = dict(xrmfile=xrmfile, xoff=xoff, yoff=yoff, det=det)
mca_thread = Thread(target=self.get_mca_area,
args=(mask,), kwargs=kwargs)
mca_thread.start()
self.show_XRFDisplay()
mca_thread.join()
if hasattr(self, 'sel_mca'):
path, fname = os.path.split(xrmfile.filename)
aname = self.sel_mca.areaname
area = xrmfile.xrmmap['areas/%s' % aname]
npix = area[()].sum()
self.sel_mca.filename = fname
self.sel_mca.title = aname
self.sel_mca.npixels = npix
self.xrfdisplay.add_mca(self.sel_mca, label='%s:%s'% (fname, aname),
plot=True)
update_xrmmap = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=self.current_file)
if self.showxrd:
for page in self.nb.pagelist:
if hasattr(page, 'onXRD'):
page.onXRD(show=True, xrd1d=True,verbose=False)
def show_XRFDisplay(self, do_raise=True, clear=True, xrmfile=None):
'make sure XRF plot frame is enabled and visible'
if xrmfile is None:
xrmfile = self.current_file
if self.xrfdisplay is None:
self.xrfdisplay = XRFDisplayFrame(parent=self.larch_buffer,
_larch=self.larch)
try:
self.xrfdisplay.Show()
except:
self.xrfdisplay = XRFDisplayFrame(parent=self.larch_buffer,
_larch=self.larch)
self.xrfdisplay.Show()
if do_raise:
self.xrfdisplay.Raise()
if clear:
self.xrfdisplay.panel.clear()
self.xrfdisplay.panel.reset_config()
def onMoveToPixel(self, xval, yval):
if not HAS_EPICS:
return
xrmmap = self.current_file.xrmmap
pos_addrs = [str(x) for x in xrmmap['config/positioners'].keys()]
pos_label = [str(x[()]) for x in xrmmap['config/positioners'].values()]
pos1 = h5str(xrmmap['config/scan/pos1'][()])
pos2 = h5str(xrmmap['config/scan/pos2'][()])
i1 = pos_addrs.index(pos1)
i2 = pos_addrs.index(pos2)
msg = '%s(%s) = %.4f, %s(%s) = %.4f?' % (pos_label[i1], pos_addrs[i1], xval,
pos_label[i2], pos_addrs[i2], yval)
if (wx.ID_YES == Popup(self, 'Really move stages to\n %s?' % msg,
'move stages to pixel?', style=wx.YES_NO)):
caput(pos_addrs[i1], xval)
caput(pos_addrs[i2], yval)
def onSavePixel(self, name, ix, iy, x=None, y=None, title=None, xrmfile=None):
'save pixel as area, and perhaps to scandb'
if x is None:
x = float(xrmfile.get_pos(0, mean=True)[ix])
if y is None:
y = float(xrmfile.get_pos(1, mean=True)[iy])
if len(name) < 1:
return
if xrmfile is None:
xrmfile = self.current_file
# first, create 1-pixel mask for area, and save that
ny, nx = xrmfile.get_shape()
tmask = np.zeros((ny, nx)).astype(bool)
tmask[int(iy), int(ix)] = True
xrmfile.add_area(tmask, name=name)
# for page in self.nb.pagelist:
# if hasattr(page, 'update_xrmmap'):
# page.update_xrmmap(xrmfile=xrmfile)
update_xrmmap = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=xrmfile)
# show position on map
self.im_displays[-1].panel.add_highlight_area(tmask, label=name)
# make sure we can save position into database
if self.scandb is None or self.instdb is None:
return
samplestage = self.instdb.get_instrument(self.inst_name)
if samplestage is None:
return
allpvs = [pv.name for pv in samplestage.pv]
pvn = pv_fullname
conf = xrmfile.xrmmap['config']
pos_addrs = [pvn(h5str(tval)) for tval in conf['positioners']]
env_addrs = [pvn(h5str(tval)) for tval in conf['environ/address']]
env_vals = [h5str(tval) for tval in conf['environ/value']]
position = {}
for pv in allpvs:
position[pv] = None
for addr, val in zip(env_addrs, env_vals):
if addr in allpvs:
position[addr] = float(val)
position[pvn(h5str(conf['scan/pos1'][()]))] = x
position[pvn(h5str(conf['scan/pos2'][()]))] = y
notes = {'source': '%s: %s' % (xrmfile.filename, name)}
self.instdb.save_position(self.inst_name, name, position,
notes=json.dumps(notes))
def add_tomodisplay(self, title, det=None, _lassocallback=True):
if _lassocallback:
lasso_cb = partial(self.lassoHandler, det=det)
else:
lasso_cb = None
imframe = MapImageFrame(output_title=title,
lasso_callback=lasso_cb)
self.tomo_displays.append(imframe)
def display_tomo(self, tomo, title='', info='', x=None, y=None, xoff=0,
yoff=0, det=None, subtitles=None, xrmfile=None,
_lassocallback=True):
displayed = False
if _lassocallback:
lasso_cb = partial(self.lassoHandler, det=det, xrmfile=xrmfile)
else:
lasso_cb = None
while not displayed:
try:
tmd = self.tomo_displays.pop()
clevel = tmd.panel.conf.contrast_level
if clevel in (0, None):
clevel = 0.5
tmd.display(tomo, title=title, subtitles=subtitles,
contrast_level=clevel)
tmd.lasso_callback = lasso_cb
displayed = True
except IndexError:
tmd = MapImageFrame(output_title=title,
lasso_callback=lasso_cb)
tmd.display(tomo, title=title, subtitles=subtitles,
contrast_level=0.5)
displayed = True
except:
displayed = False
self.tomo_displays.append(tmd)
tmd.SetStatusText(info, 1)
tmd.Show()
tmd.Raise()
def add_imdisplay(self, title, det=None):
imd = MapImageFrame(output_title=title,
lasso_callback=partial(self.lassoHandler, det=det),
cursor_labels=self.cursor_menulabels,
save_callback=self.onSavePixel)
self.im_displays.append(imd)
return imd
def display_map(self, map, title='', info='', x=None, y=None, xoff=0, yoff=0,
det=None, subtitles=None, xrmfile=None, with_savepos=True):
"""display a map in an available image display"""
if xrmfile is None:
hotcols = False
else:
hotcols = xrmfile.hotcols
if x is not None:
zigzag = abs(xrmfile.zigzag)
if zigzag != 0:
x = x[zigzag:-zigzag]
elif hotcols and map.shape[1] != x.shape[0]:
x = x[1:-1]
dopts = dict(title=title, x=x, y=y, xoff=xoff, yoff=yoff,
det=det, subtitles=subtitles,
xrmfile=xrmfile, with_savepos=with_savepos)
displayed = False
while not displayed:
if 'contrast_level' not in dopts:
dopts['contrast_level'] = 0.5
if len(self.im_displays) == 0:
imd = self.add_imdisplay(title=title, det=det)
imd.display(map, **dopts)
else:
try:
imd = self.im_displays[-1]
if imd.panel.conf.contrast_level not in (0, None):
dopts['contrast_level'] = imd.panel.conf.contrast_level
imd.display(map, **dopts)
displayed = True
except IndexError:
pass
except:
self.im_displays.pop()
imd.SetStatusText(info, 1)
imd.Show()
imd.Raise()
def display_2Dxrd(self, map, label='image 0', xrmfile=None, flip=True):
'''
displays 2D XRD pattern in diFFit viewer
'''
flptyp = 'vertical' if flip is True else False
poni = bytes2str(self.current_file.xrmmap['xrd1d'].attrs.get('calfile',''))
if not os.path.exists(poni):
poni = None
if self.xrddisplay2D is None:
self.xrddisplay2D = XRD2DViewerFrame(_larch=self.larch,flip=flptyp,
xrd1Dviewer=self.xrddisplay1D,
ponifile=poni)
try:
self.xrddisplay2D.plot2Dxrd(label,map)
except:
self.xrddisplay2D = XRD2DViewerFrame(_larch=self.larch,flip=flptyp,
xrd1Dviewer=self.xrddisplay1D)
self.xrddisplay2D.plot2Dxrd(label,map)
self.xrddisplay2D.Show()
def display_xrd1d(self, counts, q, energy, label='dataset 0', xrmfile=None):
'''
displays 1D XRD pattern in diFFit viewer
'''
wavelength = lambda_from_E(energy, E_units='keV')
xdat = xrd1d(label=label, energy=energy, wavelength=wavelength)
xdat.set_xy_data(np.array([q, counts]), 'q')
if self.xrddisplay1D is None:
self.xrddisplay1D = XRD1DViewerFrame(_larch=self.larch)
try:
self.xrddisplay1D.xrd1Dviewer.add1Ddata(xdat)
self.xrddisplay1D.Show()
except:
self.xrddisplay1D = XRD1DViewerFrame(_larch=self.larch)
self.xrddisplay1D.xrd1Dviewer.add1Ddata(xdat)
self.xrddisplay1D.Show()
def init_larch(self):
self.SetStatusText('ready')
self.datagroups = self.larch.symtable
if ESCAN_CRED is not None:
self.move_callback = self.onMoveToPixel
try:
self.scandb = connect_scandb(_larch=self.larch)
self.instdb = self.larch.symtable._scan._instdb
self.inst_name = self.scandb.get_info('samplestage_instrument',
default='SampleStage')
print(" ScanDB: %s, Instrument=%s" % (self.scandb.engine, self.inst_name))
except:
etype, emsg, tb = sys.exc_info()
print('Could not connect to ScanDB: %s' % (emsg))
self.scandb = self.instdb = None
wx.CallAfter(self.onFolderSelect)
def ShowFile(self, evt=None, filename=None, process_file=True, **kws):
if filename is None and evt is not None:
filename = evt.GetString()
if not self.h5convert_done or filename not in self.filemap:
return
self.current_file = self.filemap[filename]
if (self.check_ownership(filename) and
self.current_file.folder_has_newdata()):
if process_file:
mnew = self.roimap_panel.mapproc_nrows.GetStringSelection()
try:
mnew = int(mnew)
except:
mnew = None
self.process_file(filename, max_new_rows=mnew)
ny, nx = self.current_file.get_shape()
self.title.SetLabel('%s: (%i x %i)' % (filename, nx, ny))
fnames = self.filelist.GetItems()
cb = getattr(self.nb.GetCurrentPage(), 'update_xrmmap', None)
if callable(cb):
cb(xrmfile=self.current_file)
cb = getattr(self.nb.GetCurrentPage(), 'set_file_choices', None)
if callable(cb):
cb(fnames)
def createMenus(self):
self.menubar = wx.MenuBar()
fmenu = wx.Menu()
MenuItem(self, fmenu, '&Open XRM Map File\tCtrl+O', 'Read XRM Map File', self.onReadFile)
MenuItem(self, fmenu, '&Open XRM Map Folder\tCtrl+F', 'Read XRM Map Folder', self.onReadFolder)
fmenu.AppendSeparator()
MenuItem(self, fmenu, 'Change &Working Folder', 'Choose working directory',
self.onFolderSelect)
MenuItem(self, fmenu, 'Show Larch Buffer\tCtrl+L', 'Show Larch Programming Buffer',
self.onShowLarchBuffer)
# cmenu = fmenu.Append(-1, '&Watch HDF5 Files\tCtrl+W', 'Watch HDF5 Files', kind=wx.ITEM_CHECK)
# fmenu.Check(cmenu.Id, self.watch_files) ## False
# self.Bind(wx.EVT_MENU, self.onWatchFiles, id=cmenu.Id)
fmenu.AppendSeparator()
MenuItem(self, fmenu, '&Quit\tCtrl+Q',
'Quit program', self.onClose)
rmenu = wx.Menu()
MenuItem(self, rmenu, 'Define new ROI',
'Define new ROI', self.defineROI)
MenuItem(self, rmenu, 'Load ROI File for 1DXRD',
'Load ROI File for 1DXRD', self.add1DXRDFile)
rmenu.AppendSeparator()
MenuItem(self, rmenu, 'Load XRD calibration file',
'Load XRD calibration file', self.openPONI)
MenuItem(self, rmenu, 'Add 1DXRD for HDF5 file',
'Calculate 1DXRD for HDF5 file', self.add1DXRD)
# cmenu = fmenu.Append(-1, 'Display 1DXRD for areas',
# 'Display 1DXRD for areas',
# kind=wx.ITEM_CHECK)
#fmenu.Check(cmenu.Id, self.showxrd) ## False
#self.Bind(wx.EVT_MENU, self.onShow1DXRD, id=cmenu.Id)
hmenu = wx.Menu()
MenuItem(self, hmenu, 'About GSE XRM MapViewer', 'About GSE XRM MapViewer',
self.onAbout)
MenuItem(self, hmenu, 'Check for Updates', 'Check for Updates',
self.onCheckforUpdates)
self.menubar.Append(fmenu, '&File')
self.menubar.Append(rmenu, '&ROIs')
self.menubar.Append(hmenu, '&Help')
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_CLOSE, self.onClose)
def onShowLarchBuffer(self, evt=None):
if self.larch_buffer is None:
self.larch_buffer = LarchFrame(_larch=self.larch, is_standalone=False)
self.larch_buffer.Show()
self.larch_buffer.Raise()
def onFolderSelect(self, evt=None):
dlg = wx.DirDialog(self, 'Select Working Directory:',
os.getcwd(),
style=wx.DD_DIR_MUST_EXIST|wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
basedir = os.path.abspath(str(dlg.GetPath()))
try:
if len(basedir) > 0:
os.chdir(nativepath(basedir))
save_workdir(nativepath(basedir))
except OSError:
print( 'Changed folder failed')
pass
save_workdir('gsemap.dat')
dlg.Destroy()
def onAbout(self, event=None):
info = AboutDialogInfo()
info.SetName('GSE XRM MapViewer')
info.SetDescription('X-ray Microprobe Mapping Data Visualization and Analysis')
info.SetVersion(larch.version.__version__)
info.AddDeveloper('Matthew Newville: newville at cars.uchicago.edu')
dlg = AboutBox(info)
def onCheckforUpdates(self, event=None):
dlg = LarchUpdaterDialog(self, caller='GSE MapViewer')
dlg.Raise()
dlg.SetWindowStyle(wx.STAY_ON_TOP)
res = dlg.GetResponse()
dlg.Destroy()
if res.ok and res.run_updates:
from larch.apps import update_larch
update_larch()
self.onClose(evt=event, prompt=False)
def onClose(self, evt=None, prompt=True):
if prompt:
dlg = wx.MessageDialog(None, 'Really Quit?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
ret = dlg.ShowModal()
if ret != wx.ID_YES:
return
save_workdir('gsemap.dat')
for xrmfile in self.filemap.values():
try:
xrmfile.close()
except KeyError:
pass
## Closes maps, 2D XRD image
for disp in self.im_displays + self.plot_displays + self.tomo_displays:
try:
disp.Destroy()
except:
pass
try:
self.xrfdisplay.Destroy()
except:
pass
try:
self.xrddisplay1D.Destroy()
except:
pass
try:
self.xrddisplay2D.Destroy()
except:
pass
wx.CallAfter(self.larch.symtable._plotter.close_all_displays)
if self.larch_buffer is not None:
try:
self.larch_buffer.Show()
self.larch_buffer.onExit(force=True)
except:
pass
self.Destroy()
def onReadFile(self, evt=None):
if not self.h5convert_done:
print('cannot open file while processing a map folder')
return
dlg = wx.FileDialog(self, message='Read XRM Map File',
defaultDir=os.getcwd(),
wildcard=FILE_WILDCARDS,
style=wx.FD_OPEN|wx.FD_MULTIPLE)
path, read = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
paths = [p.replace('\\', '/') for p in dlg.GetPaths()]
dlg.Destroy()
if not read:
return
for path in paths:
parent, fname = os.path.split(path)
read = True
if fname in self.filemap:
read = (wx.ID_YES == Popup(self, "Re-read file '%s'?" % path,
'Re-read file?', style=wx.YES_NO))
if read:
xrmfile = GSEXRM_MapFile(filename=str(path), scandb=self.scandb)
self.add_xrmfile(xrmfile)
def onRead(self, path):
"simple Read and install XRM Map File"
xrmfile = GSEXRM_MapFile(filename=str(path), scandb=self.scandb)
self.add_xrmfile(xrmfile)
def onReadFolder(self, evt=None):
if not self.h5convert_done:
print( 'cannot open file while processing a map folder')
return
dlg = wx.DirDialog(self, message='Read XRM Map Folder',
defaultPath=os.getcwd(),
style=wx.DD_DIR_MUST_EXIST|wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
folder = os.path.abspath(dlg.GetPath())
dlg.Destroy()
xrmfile = GSEXRM_MapFile(folder=folder, scandb=self.scandb)
self.add_xrmfile(xrmfile)
def add_xrmfile(self, xrmfile):
parent, fname = os.path.split(xrmfile.filename)
# print("Add XRM File ", fname)
# look for group with this name or for next available group
for i in range(1000):
gname = 'map%3.3i' % (i+1)
xgroup = getattr(self.datagroups, gname, None)
if xgroup is None:
break
gpar, gfname = os.path.split(xgroup.filename)
if gfname == fname:
break
setattr(self.datagroups, gname, xrmfile)
xrmfile.groupname = gname
if fname not in self.filemap:
self.filemap[fname] = xrmfile
if fname not in self.filelist.GetItems():
self.filelist.Append(fname)
self.filelist.SetStringSelection(fname)
if self.check_ownership(fname):
mnew = self.roimap_panel.mapproc_nrows.GetStringSelection()
try:
mnew = int(mnew)
except:
mnew = None
self.process_file(fname, max_new_rows=mnew)
self.ShowFile(filename=fname)
if parent is not None and len(parent) > 0:
try:
os.chdir(nativepath(parent))
save_workdir(nativepath(parent))
except:
pass
def openPONI(self, evt=None):
"""
Read specified poni file.
mkak 2016.07.21
"""
if len(self.filemap) > 0:
myDlg = OpenPoniFile()
read = False
if myDlg.ShowModal() == wx.ID_OK:
read = True
path = myDlg.XRDInfo[1].GetValue()
flip = False if myDlg.XRDInfo[0].GetSelection() == 1 else True
myDlg.Destroy()
if read:
self.current_file.add_XRDfiles(xrdcalfile=path,flip=flip)
update_xrmmap = getattr(self.nb.GetCurrentPage(),
'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=self.current_file)
def defineROI(self, event=None):
if not self.h5convert_done:
print( 'cannot open file while processing a map folder')
return
if len(self.filemap) > 0:
myDlg = ROIPopUp(self)
path, read = None, False
if myDlg.ShowModal() == wx.ID_OK:
read = True
myDlg.Destroy()
if read:
update_xrmmap = getattr(self.nb.GetCurrentPage(),
'update_xrmmap', None)
if callable(update_xrmmap):
update_xrmmap(xrmfile=self.current_file)
def add1DXRDFile(self, event=None):
if len(self.filemap) > 0:
read = False
wildcards = '1D-XRD ROI file (*.dat)|*.dat|All files (*.*)|*.*'
dlg = wx.FileDialog(self, message='Select 1D-XRD ROI file',
defaultDir=os.getcwd(),
wildcard=wildcards,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
if read and os.path.exists(path):
time.sleep(1) ## will hopefully allow time for dialog window to close
self.current_file.read_xrd1D_ROIFile(path)
def add1DXRD(self, event=None):
if len(self.filemap) > 0:
xrd1Dgrp = ensure_subgroup('xrd1d',self.current_file.xrmmap)
poni_path = bytes2str(xrd1Dgrp.attrs.get('calfile',''))
if not os.path.exists(poni_path):
self.openPONI()
poni_path = bytes2str(xrd1Dgrp.attrs.get('calfile',''))
if os.path.exists(poni_path):
self.current_file.add_xrd1d()
def onShow1DXRD(self, event=None):
self.showxrd = event.IsChecked()
if self.showxrd:
msg = 'Show 1DXRD data for area'
else:
msg = 'Not displaying 1DXRD for area'
self.message(msg)
##print(msg)
# def onCorrectDeadtime(self, event=None):
# self.dtcor = event.IsChecked()
# if self.dtcor:
# msg = 'Using deadtime corrected data...'
# else:
# msg = 'Using raw data...'
# self.message(msg)
# ##print(msg)
#
# def onHotColumns(self, event=None):
# self.hotcols = event.IsChecked()
# if self.hotcols:
# msg = 'Ignoring first/last data columns.'
# else:
# msg = 'Using all data columns'
# self.message(msg)
# ##print(msg)
def onWatchFiles(self, event=None):
self.watch_files = event.IsChecked()
if not self.watch_files:
self.file_timer.Stop()
msg = 'Watching Files/Folders for Changes: Off'
else:
self.file_timer.Start(10000)
msg = 'Watching Files/Folders for Changes: On'
self.message(msg)
def onFileWatchTimer(self, event=None):
if self.current_file is not None and len(self.files_in_progress) == 0:
if self.current_file.folder_has_newdata():
path, fname = os.path.split(self.current_file.filename)
self.process_file(fname, max_new_rows=1e6)
def process_file(self, filename, max_new_rows=None, on_complete=None):
"""Request processing of map file.
This can take awhile, so is done in a separate thread,
with updates displayed in message bar
"""
xrmfile = self.filemap[filename]
if xrmfile.status == GSEXRM_FileStatus.created:
xrmfile.initialize_xrmmap(callback=self.updateTimer)
if xrmfile.dimension is None and isGSEXRM_MapFolder(self.folder):
xrmfile.read_master()
if (xrmfile.folder_has_newdata() and self.h5convert_done
and filename not in self.files_in_progress):
self.files_in_progress.append(filename)
self.h5convert_fname = filename
self.h5convert_done = False
self.h5convert_oncomplete = on_complete
self.htimer.Start(500)
maxrow = None
if max_new_rows is not None:
maxrow = max_new_rows + xrmfile.last_row + 1
## this calls process function of xrm_mapfile class
self.h5convert_thread = Thread(target=xrmfile.process,
kwargs={'callback':self.updateTimer,
'maxrow': maxrow})
self.h5convert_thread.start()
elif callable(on_complete):
on_complete()
def updateTimer(self, row=None, maxrow=None, filename=None, status=None):
if row is not None: self.h5convert_irow = row
if maxrow is not None: self.h5convert_nrow = maxrow
if filename is not None: self.h5convert_fname = filename
self.h5convert_done = True if status == 'complete' else False
msg = 'processing %s: row %i of %i' % (self.h5convert_fname,
self.h5convert_irow,
self.h5convert_nrow)
wx.CallAfter(self.message, msg)
def onTimer(self, event=None):
fname, irow, nrow = self.h5convert_fname, self.h5convert_irow, self.h5convert_nrow
# self.message('processing %s: row %i of %i' % (fname, irow, nrow))
# print("process timer ", self.h5convert_done, irow)
if self.h5convert_done:
self.htimer.Stop()
self.h5convert_thread.join()
self.files_in_progress = []
self.message('MapViewer processing %s: complete!' % fname)
_path, _fname = os.path.split(fname)
if _fname in self.filemap:
cfile = self.current_file = self.filemap[_fname]
ny, nx = cfile.get_shape()
self.title.SetLabel('%s: (%i x %i)' % (_fname, nx, ny))
update_xrmmap = getattr(self.nb.GetCurrentPage(),
'update_xrmmap', None)
if callable(update_xrmmap) and _fname in self.filemap:
update_xrmmap(xrmfile=cfile)
if self.h5convert_oncomplete is not None:
self.h5convert_oncomplete()
def message(self, msg, win=0):
self.statusbar.SetStatusText(msg, win)
def check_ownership(self, fname):
"""
check whether we're currently owner of the file.
this is important!! HDF5 files can be corrupted.
"""
if not self.filemap[fname].check_hostid():
if (wx.ID_YES == Popup(self, NOT_OWNER_MSG % fname,
'Not Owner of HDF5 File',
style=wx.YES_NO)):
self.filemap[fname].take_ownership()
return self.filemap[fname].check_hostid()
class OpenPoniFile(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
dialog = wx.Dialog.__init__(self, None, title='XRD Calibration File', size=(350, 280))
panel = wx.Panel(self)
################################################################################
cal_chc = ['Dioptas calibration file:','pyFAI calibration file:']
cal_spn = wx.SP_VERTICAL|wx.SP_ARROW_KEYS|wx.SP_WRAP
self.PoniInfo = [ Choice(panel, choices=cal_chc ),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...')]
self.PoniInfo[2].Bind(wx.EVT_BUTTON, self.onBROWSEponi)
ponisizer = wx.BoxSizer(wx.VERTICAL)
ponisizer.Add(self.PoniInfo[0], flag=wx.TOP, border=15)
ponisizer.Add(self.PoniInfo[1], flag=wx.TOP, border=5)
ponisizer.Add(self.PoniInfo[2], flag=wx.TOP|wx.BOTTOM, border=5)
################################################################################
hlpBtn = wx.Button(panel, wx.ID_HELP )
okBtn = wx.Button(panel, wx.ID_OK )
canBtn = wx.Button(panel, wx.ID_CANCEL )
minisizer = wx.BoxSizer(wx.HORIZONTAL)
minisizer.Add(hlpBtn, flag=wx.RIGHT, border=5)
minisizer.Add(canBtn, flag=wx.RIGHT, border=5)
minisizer.Add(okBtn, flag=wx.RIGHT, border=5)
################################################################################
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add((-1, 10))
sizer.Add(ponisizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add((-1, 15))
sizer.Add(minisizer, flag=wx.ALIGN_RIGHT, border=5)
panel.SetSizer(sizer)
################################################################################
## Set defaults
self.PoniInfo[0].SetSelection(0)
self.FindWindowById(wx.ID_OK).Disable()
def checkOK(self,event=None):
if os.path.exists(self.PoniInfo[1].GetValue()):
self.FindWindowById(wx.ID_OK).Enable()
else:
self.FindWindowById(wx.ID_OK).Disable()
def onBROWSEponi(self,event=None):
wildcards = 'XRD calibration file (*.poni)|*.poni|All files (*.*)|*.*'
if os.path.exists(self.PoniInfo[1].GetValue()):
dfltDIR = self.PoniInfo[1].GetValue()
else:
dfltDIR = os.getcwd()
dlg = wx.FileDialog(self, message='Select XRD calibration file',
defaultDir=dfltDIR,
wildcard=wildcards, style=wx.FD_OPEN)
path, read = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
if read:
self.PoniInfo[1].Clear()
self.PoniInfo[1].SetValue(str(path))
self.checkOK()
##################
class ROIPopUp(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self, owner, **kws):
"""Constructor"""
dialog = wx.Dialog.__init__(self, None, title='ROI Tools', size=(450, 500))
panel = wx.Panel(self)
################################################################################
self.owner = owner
self.cfile = self.owner.current_file
self.xrmmap = self.cfile.xrmmap
self.gp = GridPanel(panel, nrows=8, ncols=4,
itemstyle=LEFT, gap=3, **kws)
self.roi_name = wx.TextCtrl(self, -1, 'ROI_001', size=(120, -1))
self.roi_chc = [Choice(self, size=(150, -1)),
Choice(self, size=(150, -1))]
fopts = dict(minval=-1, precision=3, size=(100, -1))
self.roi_lims = [FloatCtrl(self, value=0, **fopts),
FloatCtrl(self, value=-1, **fopts),
FloatCtrl(self, value=0, **fopts),
FloatCtrl(self, value=-1, **fopts)]
self.gp.Add(SimpleText(self, ' Add new ROI: '), dcol=2, style=LEFT, newrow=True)
self.gp.Add(SimpleText(self, ' Name:'), newrow=True)
self.gp.Add(self.roi_name, dcol=2)
self.gp.Add(SimpleText(self, ' Type:'), newrow=True)
self.gp.Add(self.roi_chc[0], dcol=2)
self.gp.Add(SimpleText(self, ' Limits:'), newrow=True)
self.gp.AddMany((self.roi_lims[0],self.roi_lims[1],self.roi_chc[1]),
dcol=1, style=LEFT)
self.gp.AddMany((SimpleText(self, ' '),self.roi_lims[2],self.roi_lims[3]),
dcol=1, style=LEFT, newrow=True)
self.gp.AddMany((SimpleText(self, ' '),
Button(self, 'Add ROI', size=(100, -1), action=self.onCreateROI)),
dcol=1, style=LEFT, newrow=True)
###############################################################################
self.rm_roi_ch = [Choice(self, size=(120, -1)),
Choice(self, size=(120, -1))]
fopts = dict(minval=-1, precision=3, size=(100, -1))
self.rm_roi_lims = SimpleText(self, '')
self.gp.Add(SimpleText(self, 'Delete ROI: '), dcol=2, newrow=True)
self.gp.AddMany((SimpleText(self, 'Detector:'),self.rm_roi_ch[0]), newrow=True)
self.gp.AddMany((SimpleText(self, 'ROI:'),self.rm_roi_ch[1]), newrow=True)
self.gp.Add(SimpleText(self, 'Limits:'), newrow=True)
self.gp.Add(self.rm_roi_lims, dcol=3)
self.gp.AddMany((SimpleText(self, ''),Button(self, 'Remove ROI', size=(100, -1), action=self.onRemoveROI)), newrow=True)
self.gp.Add(SimpleText(self, ''),newrow=True)
self.gp.AddMany((SimpleText(self, ''),SimpleText(self, ''),
wx.Button(self, wx.ID_OK, label='Done')), newrow=True)
self.roi_chc[0].Bind(wx.EVT_CHOICE, self.roiUNITS)
self.roi_lims[2].Disable()
self.roi_lims[3].Disable()
self.rm_roi_ch[1].Bind(wx.EVT_CHOICE, self.roiSELECT)
self.gp.pack()
self.cfile.reset_flags()
self.roiTYPE()
def roiTYPE(self,event=None):
roitype = []
delroi = []
if self.cfile.has_xrf:
roitype += ['XRF']
if self.cfile.has_xrd1d:
roitype += ['1DXRD']
delroi = ['xrd1d']
if self.cfile.has_xrd2d:
roitype += ['2DXRD']
if len(roitype) < 1:
roitype = ['']
self.roi_chc[0].SetChoices(roitype)
self.roiUNITS()
if len(delroi) > 0:
self.rm_roi_ch[0].SetChoices(delroi)
self.setROI()
def onRemoveROI(self,event=None):
detname = self.rm_roi_ch[0].GetStringSelection()
roiname = self.rm_roi_ch[1].GetStringSelection()
if detname == 'xrd1d':
self.cfile.del_xrd1droi(roiname)
self.setROI()
def setROI(self):
detname = self.rm_roi_ch[0].GetStringSelection()
try:
detgrp = self.cfile.xrmmap['roimap'][detname]
except:
return
limits = []
names = detgrp.keys()
for name in names:
limits += [list(detgrp[name]['limits'][:])]
if len(limits) > 0:
self.rm_roi_ch[1].SetChoices([x for (y,x) in sorted(zip(limits,names))])
self.roiSELECT()
def roiSELECT(self, event=None):
detname = self.rm_roi_ch[0].GetStringSelection()
roiname = self.rm_roi_ch[1].GetStringSelection()
roimap = self.cfile.xrmmap['roimap']
roi = None
if detname in roimap:
detroi = roimap[detname]
if roiname in detroi:
roi = detroi[roiname]
if roi is None:
return
limits = roi['limits'][:]
units = bytes2str(roi['limits'].attrs.get('units',''))
if units == '1/A':
roistr = '[%0.2f to %0.2f %s]' % (limits[0],limits[1],units)
else:
roistr = '[%0.1f to %0.1f %s]' % (limits[0],limits[1],units)
self.rm_roi_lims.SetLabel(roistr)
def roiUNITS(self,event=None):
choice = self.roi_chc[0].GetStringSelection()
roiunit = ['']
if choice == 'XRF':
roiunit = ['eV','keV','channels']
self.roi_lims[2].Disable()
self.roi_lims[3].Disable()
elif choice == '1DXRD':
roiunit = [u'\u212B\u207B\u00B9 (q)',u'\u00B0 (2\u03B8)',u'\u212B (d)']
self.roi_lims[2].Disable()
self.roi_lims[3].Disable()
elif choice == '2DXRD':
roiunit = ['pixels']
self.roi_lims[2].Enable()
self.roi_lims[3].Enable()
self.roi_chc[1].SetChoices(roiunit)
def onCreateROI(self,event=None):
xtyp = self.roi_chc[0].GetStringSelection()
xunt = self.roi_chc[1].GetStringSelection()
xname = self.roi_name.GetValue()
xrange = [float(lims.GetValue()) for lims in self.roi_lims]
# print("Create ROI ", xtyp, xunt, xname, xrange)
if xtyp != '2DXRD': xrange = xrange[:2]
self.owner.message('Building ROI data for: %s' % xname)
if xtyp == 'XRF':
self.cfile.add_xrfroi(xrange,xname,unit=xunt)
elif xtyp == '1DXRD':
xrd = ['q','2th','d']
unt = xrd[self.roi_chc[1].GetSelection()]
self.cfile.add_xrd1droi(xrange, xname, unit=unt)
elif xtyp == '2DXRD':
self.cfile.add_xrd2droi(xrange,xname,unit=xunt)
self.owner.message('Added ROI: %s' % xname)
##################
class OpenMapFolder(wx.Dialog):
""""""
#----------------------------------------------------------------------
def __init__(self, folder):
"""Constructor"""
self.folder = folder
pref, f = os.path.split(folder)
title = "Read XRM Map Folder: %s" % f
dialog = wx.Dialog.__init__(self, None,
title=title, size=(475, 750))
panel = wx.Panel(self)
ChkTtl = SimpleText(panel, label='Build map including data:' )
self.ChkBx = [ Check(panel, label='XRF' ),
Check(panel, label='2DXRD' ),
Check(panel, label='1DXRD (requires calibration file)' )]
for chkbx in self.ChkBx:
chkbx.Bind(wx.EVT_CHECKBOX, self.checkOK)
cbsizer = wx.BoxSizer(wx.HORIZONTAL)
cbsizer.Add(self.ChkBx[0])
cbsizer.Add(self.ChkBx[1])
cbsizer.Add(self.ChkBx[2])
ckbxsizer = wx.BoxSizer(wx.VERTICAL)
ckbxsizer.Add(ChkTtl, flag=wx.BOTTOM|wx.LEFT)
ckbxsizer.Add(cbsizer)
################################################################################
infoTtl = [ SimpleText(panel, label='Facility'),
SimpleText(panel, label='Beamline'),
SimpleText(panel, label='Run cycle'),
SimpleText(panel, label='Proposal'),
SimpleText(panel, label='User group')]
self.info = [ wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(100, 25) ),
wx.TextCtrl(panel, size=(320, 25) )]
infosizer0 = wx.BoxSizer(wx.HORIZONTAL)
infosizer0.Add(infoTtl[0], flag=wx.RIGHT, border=5)
infosizer0.Add(self.info[0], flag=wx.RIGHT, border=15)
infosizer0.Add(infoTtl[1], flag=wx.RIGHT, border=5)
infosizer0.Add(self.info[1], flag=wx.RIGHT, border=15)
infosizer1 = wx.BoxSizer(wx.HORIZONTAL)
infosizer1.Add(infoTtl[2], flag=wx.RIGHT, border=5)
infosizer1.Add(self.info[2], flag=wx.RIGHT, border=15)
infosizer1.Add(infoTtl[3], flag=wx.RIGHT, border=5)
infosizer1.Add(self.info[3], flag=wx.RIGHT, border=15)
infosizer2 = wx.BoxSizer(wx.HORIZONTAL)
infosizer2.Add(infoTtl[4], flag=wx.RIGHT, border=5)
infosizer2.Add(self.info[4], flag=wx.RIGHT, border=15)
infosizer = wx.BoxSizer(wx.VERTICAL)
infosizer.Add(infosizer0, flag=wx.TOP, border=5)
infosizer.Add(infosizer1, flag=wx.TOP|wx.BOTTOM, border=5)
infosizer.Add(infosizer2, flag=wx.BOTTOM, border=15)
################################################################################
cal_chc = ['Dioptas calibration file:','pyFAI calibration file:']
bkgd_chc = ['2DXRD background (optional):','1DXRD background (optional):']
cal_spn = wx.SP_VERTICAL|wx.SP_ARROW_KEYS|wx.SP_WRAP
self.XRDInfo = [ Choice(panel, choices=cal_chc ),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...'),
SimpleText(panel, label='Steps:'),
wx.TextCtrl(panel, size=(80, 25)),
SimpleText(panel, label='Wedges:'),
wx.SpinCtrl(panel, style=cal_spn, size=(100, -1)),
Choice(panel, choices=bkgd_chc ),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...'),
SimpleText(panel, label='Background scale:'),
wx.TextCtrl(panel, size=(80, 25)),
SimpleText(panel, label='2DXRD mask file (optional):'),
wx.TextCtrl(panel, size=(320, 25)),
Button(panel, label='Browse...'),]
for i in [1,8,13]:
self.XRDInfo[i+1].Bind(wx.EVT_BUTTON, partial(self.onBROWSEfile,i=i))
xrdsizer1 = wx.BoxSizer(wx.HORIZONTAL)
xrdsizer1.Add(self.XRDInfo[3], flag=wx.RIGHT, border=5)
xrdsizer1.Add(self.XRDInfo[4], flag=wx.RIGHT, border=5)
xrdsizer1.Add(self.XRDInfo[5], flag=wx.RIGHT, border=5)
xrdsizer1.Add(self.XRDInfo[6], flag=wx.RIGHT, border=5)
xrdsizer2 = wx.BoxSizer(wx.HORIZONTAL)
xrdsizer2.Add(self.XRDInfo[9], flag=wx.RIGHT, border=30)
xrdsizer2.Add(self.XRDInfo[10], flag=wx.RIGHT, border=5)
xrdsizer2.Add(self.XRDInfo[11], flag=wx.RIGHT, border=5)
xrdsizer = wx.BoxSizer(wx.VERTICAL)
xrdsizer.Add(self.XRDInfo[0], flag=wx.TOP, border=5)
xrdsizer.Add(self.XRDInfo[1], flag=wx.TOP, border=5)
xrdsizer.Add(self.XRDInfo[2], flag=wx.TOP|wx.BOTTOM, border=5)
xrdsizer.Add(xrdsizer1, flag=wx.BOTTOM, border=5)
xrdsizer.Add(self.XRDInfo[7], flag=wx.TOP, border=8)
xrdsizer.Add(self.XRDInfo[8], flag=wx.TOP, border=5)
# xrdsizer.Add(self.XRDInfo[9], flag=wx.TOP|wx.BOTTOM, border=5)
xrdsizer.Add(xrdsizer2, flag=wx.TOP|wx.BOTTOM, border=5)
xrdsizer.Add(self.XRDInfo[12], flag=wx.TOP, border=8)
xrdsizer.Add(self.XRDInfo[13], flag=wx.TOP, border=5)
xrdsizer.Add(self.XRDInfo[14], flag=wx.TOP|wx.BOTTOM, border=5)
################################################################################
h5cmpr_chc = ['gzip','lzf']
h5cmpr_opt = ['%i' % i for i in np.arange(10)]
self.H5cmprInfo = [Choice(panel, choices=h5cmpr_chc),
Choice(panel, choices=h5cmpr_opt)]
h5txt = SimpleText(panel, label='H5 File Comppression:')
self.H5cmprInfo[0].SetSelection(0)
self.H5cmprInfo[1].SetSelection(2)
self.H5cmprInfo[0].Bind(wx.EVT_CHOICE, self.onH5cmpr)
h5cmprsizer = wx.BoxSizer(wx.HORIZONTAL)
h5cmprsizer.Add(h5txt, flag=wx.RIGHT, border=5)
h5cmprsizer.Add(self.H5cmprInfo[0], flag=wx.RIGHT, border=5)
h5cmprsizer.Add(self.H5cmprInfo[1], flag=wx.RIGHT, border=5)
################################################################################
self.ok_btn = wx.Button(panel, wx.ID_OK)
self.cancel_btn = wx.Button(panel, wx.ID_CANCEL)
minisizer = wx.BoxSizer(wx.HORIZONTAL)
minisizer.Add(self.cancel_btn, flag=wx.RIGHT, border=5)
minisizer.Add(self.ok_btn, flag=wx.RIGHT, border=5)
################################################################################
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(ckbxsizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(HLine(panel, size=(320, 2)),flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(infosizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(HLine(panel, size=(320, 2)),flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(xrdsizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(HLine(panel, size=(320, 2)),flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(h5cmprsizer, flag=wx.TOP|wx.LEFT, border=5)
sizer.Add(minisizer, flag=wx.ALIGN_RIGHT, border=5)
pack(panel, sizer)
w, h = panel.GetBestSize()
w = 25*(2 + int(w*0.04))
h = 25*(2 + int(h*0.04))
panel.SetSize((w, h))
# HX
################################################################################
## Set defaults
self.ChkBx[0].SetValue(True)
self.ChkBx[1].SetValue(False)
self.ChkBx[2].SetValue(False)
self.XRDInfo[0].SetSelection(0)
self.XRDInfo[7].SetSelection(0)
self.XRDInfo[4].SetValue('5001')
self.XRDInfo[6].SetValue(1)
self.XRDInfo[6].SetRange(0,36)
self.XRDInfo[11].SetValue('1.0')
for poniinfo in self.XRDInfo:
poniinfo.Disable()
self.info[0].SetValue(FACILITY)
self.info[1].SetValue(BEAMLINE)
for line in open(os.path.join(self.folder, 'Scan.ini'), 'r'):
if line.split()[0] == 'basedir':
npath = line.split()[-1].replace('\\', '/').split('/')
cycle, usr = npath[-2], npath[-1]
self.info[2].SetValue(cycle)
self.info[4].SetValue(usr)
self.checkOK()
def checkOK(self, evt=None):
if self.ChkBx[2].GetValue():
for poniinfo in self.XRDInfo:
poniinfo.Enable()
elif self.ChkBx[1].GetValue():
for poniinfo in self.XRDInfo[8:]:
poniinfo.Enable()
for poniinfo in self.XRDInfo[:8]:
poniinfo.Disable()
self.XRDInfo[7].SetSelection(0)
else:
for poniinfo in self.XRDInfo:
poniinfo.Disable()
def onH5cmpr(self,event=None):
if self.H5cmprInfo[0].GetSelection() == 0:
self.H5cmprInfo[1].Enable()
self.H5cmprInfo[1].SetChoices(['%i' % i for i in np.arange(10)])
self.H5cmprInfo[1].SetSelection(2)
else:
self.H5cmprInfo[1].Disable()
self.H5cmprInfo[1].SetChoices([''])
def onBROWSEfile(self,event=None,i=1):
if i == 8:
wldcd = '2D XRD background file (*.tiff)|*.tif;*.tiff;*.edf|All files (*.*)|*.*'
if i == 13:
wldcd = '1D XRD background file (*.xy)|*.xy|All files (*.*)|*.*'
else: ## elif i == 1:
wldcd = 'XRD calibration file (*.poni)|*.poni|All files (*.*)|*.*'
if os.path.exists(self.XRDInfo[i].GetValue()):
dfltDIR = self.XRDInfo[i].GetValue()
else:
dfltDIR = os.getcwd()
dlg = wx.FileDialog(self, message='Select %s' % wldcd.split(' (')[0],
defaultDir=dfltDIR,
wildcard=wldcd, style=wx.FD_OPEN)
path, read = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
dlg.Destroy()
if read:
self.XRDInfo[i].Clear()
self.XRDInfo[i].SetValue(str(path))
class MapViewer(LarchWxApp):
def __init__(self, use_scandb=False, _larch=None, filename=None,
version_info=None, with_inspect=False, **kws):
self.filename = filename
self.use_scandb = use_scandb
LarchWxApp.__init__(self, _larch=_larch,
version_info=version_info,
with_inspect=with_inspect, **kws)
def createApp(self):
frame = MapViewerFrame(use_scandb=self.use_scandb,
filename=self.filename,
version_info=self.version_info,
_larch=self._larch)
self.SetTopWindow(frame)
return True
def mapviewer(use_scandb=False, filename=None, _larch=None,
with_inspect=False, **kws):
MapViewer(use_scandb=use_scandb, filename=filename, _larch=_larch,
with_inspect=with_inspect, **kws)
|
main.py | import shutil
import os
import sys
from threading import Thread
from configparser import RawConfigParser
import time
# from win32api import MessageBox
global out
out = 0
def createBatchFile( source , destination , isfile=True):
cached_stamp = 0
while (out == 0):
if isfile:
stamp = os.stat(source ).st_mtime
if stamp != cached_stamp:
cached_stamp = stamp
try:
shutil.copy(source, destination)
except Exception as e:
print(e)
else :
for i in os.listdir(source):
stamp = os.stat(os.path.join(source , i) ).st_mtime
if stamp != cached_stamp:
cached_stamp = stamp
try:
shutil.copy(os.path.join(source , i), os.path.join(destination , i))
except Exception as e:
print(e)
time.sleep(2)
if __name__ == '__main__':
try:
if os.path.isfile(os.path.join(os.getcwd() , 'config.ini')):
config = RawConfigParser()
config.read('config.ini')
source = config.get('APP', 'source')
destination = config.get('APP', 'destination')
if not os.path.exists(source):
print(f"{source} Path not found")
# MessageBox(0 , f"{source} Path not found" , "Error" ,0x00000010 )
sys.exit()
if not os.path.exists(destination):
os.mkdir(destination)
isfile = os.path.isfile(source)
t = Thread(target=createBatchFile, args=(source, destination , isfile))
t.start()
out = int(input("Press 1 to exit : "))
else :
with open(os.path.join(os.getcwd() , 'config.ini') , 'w') as f:
f.write("[APP]\nsource=\ndestination=")
f.close()
# MessageBox(0 , "Kindly provide the source and destination path in config.ini file" , "Error" ,0x00000010 )
sys.exit()
except Exception as e:
print(e)
# MessageBox(0 , repr(e) , "Error" ,0x00000010 )
sys.exit() |
test_cig_multiplayer.py | # Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
from gym_vizdoom import (LIST_OF_ENVS, EXPLORATION_GOAL_FRAME, GOAL_REACHING_REWARD)
import vizdoomgym
from multiprocessing import Process
from threading import Thread
import time
def run_agent(a_id):
print(f"making {a_id}")
env = gym.make("VizdoomCig-v0", agent_id=a_id, agents_total=3, port=5039)
env.imitation = True
policy = lambda env, obs: env.action_space.sample()
done = False
steps = 0
for i in range(0, 5):
print("New epoch")
steps = 0
obs = env.reset()
while True:
steps += 1
print(steps)
time.sleep(0.05)
action = policy(env, obs)
if int(a_id) == 0:
obs, reward, done, info = env.step(action)
else:
obs, reward, done, info = env.step(action)
if reward != 0:
print(reward)
agents = []
host = Process(target=run_agent, args=(str(0)))
host.start()
player2 = Process(target=run_agent, args=(str(1)))
player2.start()
player3 = Process(target=run_agent, args=(str(2)))
player3.start()
# run_agent(0)
input() |
manager.py | #!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("FuseWithStockScc", "0"),
("ShowDebugUI", "0")
]
if TICI:
default_params.append(("IsUploadRawEnabled", "1"))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
system("am startservice com.neokii.openpilot/.MainService")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId") == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
multitester.py | """
Certbot Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
from botocore.exceptions import ClientError
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
parser.add_argument('--fast',
action='store_true',
help="use larger instance types to run faster (saves about a minute, probably not worth it)")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-5f490b35' # premade shared boulder AMI 14.04LTS us-east-1
LOGDIR = "" #points to logging / working directory
# boto3/AWS api globals
AWS_SESSION = None
EC2 = None
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def make_security_group():
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = EC2.create_security_group(GroupName="letsencrypt_test",
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(instance_name,
ami_id,
keyname,
machine_type='t2.micro',
security_groups=['letsencrypt_test'],
userdata=""): #userdata contains bash or cloud-init script
new_instance = EC2.create_instances(
ImageId=ami_id,
SecurityGroups=security_groups,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type)[0]
# brief pause to prevent rare error on EC2 delay, should block until ready instead
time.sleep(1.0)
# give instance a name
try:
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
except ClientError as e:
if "InvalidInstanceID.NotFound" in str(e):
# This seems to be ephemeral... retry
time.sleep(1)
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
else:
raise
return new_instance
def terminate_and_clean(instances):
"""
Some AMIs specify EBS stores that won't delete on instance termination.
These must be manually deleted after shutdown.
"""
volumes_to_delete = []
for instance in instances:
for bdmap in instance.block_device_mappings:
if 'Ebs' in bdmap.keys():
if not bdmap['Ebs']['DeleteOnTermination']:
volumes_to_delete.append(bdmap['Ebs']['VolumeId'])
for instance in instances:
instance.terminate()
# can't delete volumes until all attaching instances are terminated
_ids = [instance.id for instance in instances]
all_terminated = False
while not all_terminated:
all_terminated = True
for _id in _ids:
# necessary to reinit object for boto3 to get true state
inst = EC2.Instance(id=_id)
if inst.state['Name'] != 'terminated':
all_terminated = False
time.sleep(5)
for vol_id in volumes_to_delete:
volume = EC2.Volume(id=vol_id)
volume.delete()
return volumes_to_delete
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
# the reinstantiation from id is necessary to force boto3
# to correctly update the 'state' variable during init
_id = booting_instance.id
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
while _state != 'running' or _ip is None:
time.sleep(wait_time)
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
block_until_ssh_open(_ip)
time.sleep(extra_wait_time)
return _instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git checkout lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('go run cmd/rabbitmq-setup/main.go -server amqp://localhost')
run('nohup ./start.py >& /dev/null < /dev/null &')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_certbot(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_certbot_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./certbot.log ]; then \
cat ./certbot.log; else echo "[nolocallog]"; fi')
def create_client_instances(targetlist):
"Create a fleet of client instances"
instances = []
print("Creating instances: ", end="")
for target in targetlist:
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
instances.append(make_instance(name,
target['ami'],
KEYNAME,
machine_type=machine_type,
userdata=userdata))
print()
return instances
def test_client_process(inqueue, outqueue):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, target = inreq
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instances[ii] = block_until_instance_ready(instances[ii])
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
print(env.host_string)
try:
install_and_launch_certbot(instances[ii], boulder_url, target)
outqueue.put((ii, target, 'pass'))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, 'fail'))
print("%s - %s FAIL"%(target['ami'], target['name']))
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
execute(grab_certbot_log)
except:
print("log fail\n")
pass
def cleanup(cl_args, instances, targetlist):
print('Logs in ', LOGDIR)
if not cl_args.saveinstances:
print('Terminating EC2 Instances and Cleaning Dangling EBS Volumes')
if cl_args.killboulder:
boulder_server.terminate()
terminate_and_clean(instances)
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
#-------------------------------------------------------------------------------
# SCRIPT BEGINS
#-------------------------------------------------------------------------------
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
LOGDIR = "letest-%d"%int(time.time())
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
EC2 = AWS_SESSION.resource('ec2')
print("Making Security Group")
sg_exists = False
for sg in EC2.security_groups.all():
if sg.group_name == 'letsencrypt_test':
sg_exists = True
print(" %s already exists"%'letsencrypt_test')
if not sg_exists:
make_security_group()
time.sleep(30)
boulder_preexists = False
boulder_servers = EC2.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance('le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_groups=['letsencrypt_test'])
try:
if not cl_args.boulderonly:
instances = create_client_instances(targetlist)
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
SENTINEL = None #queue kill signal
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
for p in jobs:
p.join()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
for outq in outputs:
ii, target, status = outq
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
results_file.close()
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
|
sendmail2.py | from flask import Flask,render_template,render_template_string
from flask_script import Manager
from flask_mail import Mail,Message
import os
from threading import Thread #导入线程
app = Flask(__name__)
app.config['MAIL_SERVER'] = os.environ.get('MAIL_SERVER','smtp.1000phone.com')
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME','xialigang@1000phone.com')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD','123456')
mail = Mail(app)
manager = Manager(app)
def async_send_mail(msg):
#开启程序上下文
with app.app_context():
mail.send(message=msg) #发送邮件
@app.route('/send_mail/')
def send_mail():
msg = Message(subject='邮件激活',recipients=['793390457@qq.com'],sender=app.config['MAIL_USERNAME'])
msg.html = render_template_string('<h1>你好 请点击右侧链接 进行账户的激活 <a href="http://www.baidu.com">激活</a></h1>')
thr = Thread(target=async_send_mail,args=(msg,))
thr.start() #开启线程
return render_template_string('发送邮件')
if __name__ == '__main__':
manager.run()
|
elastic_cvr_extract.py | from elasticsearch import Elasticsearch
# import elasticsearch_dsl
from elasticsearch_dsl import Search
from collections import namedtuple
import datetime
import ujson as json
import os
import pytz
import tqdm
import logging
import threading
from .field_parser import utc_transform
from . import config, create_session, engine, setup_database_connection
from . import alchemy_tables
from .bug_report import add_error
from . import data_scanner
from .cvr_download import download_all_dicts_to_file
from multiprocessing.pool import Pool
import multiprocessing
import time
import sys
def update_all_mp(workers=1):
# https://docs.python.org/3/howto/logging-cookbook.html
#multiprocessing.log_to_stderr()
#logger = logging.getLogger('cvrparser')
#logger.setLevel(logging.INFO)
# add two handlers here
lock = multiprocessing.Lock()
queue_size = 20000
queue = multiprocessing.Queue(maxsize=queue_size) # maxsize=1000*1000*20)
prod = multiprocessing.Process(target=cvr_update_producer, args=(queue, lock))
# prod.daemon = True
prod.start()
consumers = [multiprocessing.Process(target=cvr_update_consumer, args=(queue, lock))
for _ in range(workers)]
for c in consumers:
c.daemon = True
c.start()
try:
prod.join()
print('Producer done', 'adding sentinels')
with lock:
print('Producer Done - Adding Sentinels')
except Exception as e:
with lock:
print('Something wroing in waiting for producer')
print('Exception:', e)
for i in range(workers):
print('Adding sentinel', i)
queue.put(CvrConnection.cvr_sentinel)
for c in consumers:
print('waiting for consumers', c)
c.join()
print('all consumers done')
queue.close()
def create_elastic_connection(url, authentication, timeout=60, max_retries=10, retry=True):
return Elasticsearch(url,
http_auth=authentication,
timeout=timeout,
max_retries=max_retries,
retry_on_timeout=retry,
http_compress=True)
class CvrConnection(object):
""" Class for connecting and retrieving data from danish CVR register """
dummy_date = datetime.datetime(year=1001, month=1, day=1, tzinfo=pytz.utc)
source_keymap = {'virksomhed': 'Vrvirksomhed',
'deltager': 'Vrdeltagerperson',
'produktionsenhed': 'VrproduktionsEnhed'}
update_info = namedtuple('update_info', ['samtid', 'sidstopdateret'])
cvr_sentinel = 'CVR_SENTINEL'
def __init__(self, update_address=False):
""" Setup everything needed for elasticsearch
connection to Danish Business Authority for CVR data extraction
consider moving elastic search connection into __init__
Args:
-----
:param update_address: bool,
determine if parse and insert address as well (slows it down)
"""
self.url = 'http://distribution.virk.dk:80'
self.index = 'cvr-permanent'
self.company_type = 'virksomhed'
self.penhed_type = 'produktionsenhed'
self.person_type = 'deltager'
self.user = config['cvr_user']
self.password = config['cvr_passwd']
# self.datapath = config['data_path']
self.update_batch_size = 64
self.update_address = update_address
self.address_parser_factory = data_scanner.AddressParserFactory()
# self.ElasticParams = [self.url, (self.user, self.password), 60, 10, True]
self.elastic_client = create_elastic_connection(self.url, (self.user, self.password))
print('Elastic Search Client:', self.elastic_client.info())
self.elastic_search_scan_size = 128
self.elastic_search_scroll_time = u'20m'
# max number of updates to download without scan scroll
self.max_download_size = 200000
self.update_list = namedtuple('update_list',
['enhedsnummer', 'sidstopdateret'])
self.dummy_date = datetime.datetime(year=1001,
month=1,
day=1,
tzinfo=pytz.utc)
# self.data_file = os.path.join(self.datapath, 'cvr_all.json')
def search_field_val(self, field, value, size=10):
search = Search(using=self.elastic_client, index=self.index)
search = search.query('match', **{field: value}).extra(size=size)
response = search.execute()
hits = response.hits.hits
return hits
def get_entity(self, enh):
""" Get CVR info from given entities
Args:
-----
:param enh: list, list of CVR ids (enhedsnummer)
"""
search = Search(using=self.elastic_client, index=self.index)
search = search.query('ids', values=enh).extra(size=len(enh))
# search = search.query('match', values=enh)
response = search.execute()
hits = response.hits.hits
return hits
def get_pnummer(self, pnummer):
""" Get CVR info from given production unit id
Args:
-----
pnummer: id of production unit
"""
search = Search(using=self.elastic_client, index=self.index)
search = search.query('match', _type=self.penhed_type)
search = search.query('match', **{'VrproduktionsEnhed.pNummer': pnummer})
response = search.execute()
hits = response.hits.hits
return hits
def get_cvrnummer(self, cvrnummer):
""" Get CVR info from given cvr id
:param cvrnummer: int, cvrnumber of company
:return: dict, data for company
"""
search = Search(using=self.elastic_client, index=self.index)
search = search.query('match', **{'Vrvirksomhed.cvrNummer': cvrnummer})
response = search.execute()
hits = response.hits.hits
return hits
@staticmethod
def update_all(self):
"""
Update CVR Company Data
download updates
perform updates
rewrite to producer consumer.
"""
update_all_mp(3)
return
# assert False, 'DEPRECATED'
# session = create_session()
# ud_table = alchemy_tables.Update
# res = session.query(ud_table).first()
# session.close()
# if res is None:
# update_all_mp(3)
# else:
# update_since_last(3)
def download_all_data_to_file(self, filename):
"""
:return:
str: filename, datetime: download time, bool: new download or use old file
"""
params = {'scroll': self.elastic_search_scroll_time, 'size': self.elastic_search_scan_size}
search = Search(using=self.elastic_client, index=self.index)
search = search.query('match_all')
search = search.params(**params)
download_all_dicts_to_file(filename, search)
def download_all_dicts_to_file_from_update_info(self, update_info):
""" DEPRECATED
:param update_info: update_info, dict with update info for each unit type
:return:
str: filename, datetime: download time, bool: new download or use old file
"""
print('Download Data Write to File - DEPRECATED')
filename = os.path.join('/data/cvr_update.json')
if os.path.exists(filename):
"filename exists {0} overwriting".format(filename)
os.remove(filename)
print('Download updates to file name: {0}'.format(filename))
params = {'scroll': self.elastic_search_scroll_time, 'size': self.elastic_search_scan_size}
for (_, _type) in self.source_keymap.items():
print('Downloading Type {0}'.format(_type))
search = Search(using=self.elastic_client, index=self.index)
if len(update_info[_type]['units']) < self.max_download_size:
print('Few to update: {0}\nGet in match query:'.format(len(update_info[_type]['units'])))
units = [x[0] for x in update_info[_type]['units']]
search = search.query('ids', values=units)
else:
print('To many for match query... - Get a lot of stuff we do not need')
search = search.query('range',
**{'{0}.sidstOpdateret'.format(_type):
{'gte': update_info[_type]['sidstopdateret']}})
# search = search.query('match', _type=cvr_type)
# search = search.query('match',
search = search.params(**params)
download_all_dicts_to_file(filename, search, mode='a')
print('{0} handled:'.format(_type))
return filename
def update_units(self, enh):
""" Force download and update of given units
Args:
-----
enh: list , id of units to update (enhedsnummer)
"""
data_list = self.get_entity(enh)
dicts = {x: list() for x in self.source_keymap.values()}
for _data_dict in data_list:
# dict_type = data_dict['_type']
# print('data dict', data_dict)
#dict_type = data_dict['_type']
data_dict = _data_dict.to_dict()
keys = data_dict['_source'].keys()
dict_type_set = keys & CvrConnection.source_keymap.values() # intersects the two key sets
if len(dict_type_set) != 1:
import pdb
pdb.set_trace()
add_error('BAD DICT DOWNLOADED {0}'.format(data_dict))
continue
key = dict_type_set.pop()
# if dict_type not in self.source_keymap:
# dict_type = data_dict['_source'].keys()
# import pdb
# pdb.set_trace()
# if data_dict['_source']['enhedsType'] == 'VIRKSOMHED':
# dicts['Vrvirksomhed'].append(data_dict['_source'])
# else:
# import pdb
# pdb.set_trace()
# assert False
# key = self.source_keymap[dict_type]
dicts[key].append(data_dict['_source'][key])
if len(dicts[key]) >= self.update_batch_size:
self.update(dicts[key], key)
dicts[key].clear()
for enh_type, _dicts in dicts.items():
if len(_dicts) > 0:
self.update(_dicts, enh_type)
def update(self, dicts, _type):
""" Update given entities
Args:
dicts: list of dictionaries with data
_type: string, type object to update
"""
enh = [x['enhedsNummer'] for x in dicts]
CvrConnection.delete(enh, _type)
try:
self.insert(dicts, _type)
except Exception as e:
print(e)
print('enh {0} failed - enh_type: {1}'.format(enh, _type))
raise e
# print('Update Done!')
def update_employment_only(self, dicts, _type):
""" Update employment - used due to cvr bug that does not update version id when updating employment so we just always update that
Args:
dicts: list of dictionaries with data
_type: string, type object to update
"""
enh = [x['enhedsNummer'] for x in dicts]
CvrConnection.delete_employment_only(enh)
try:
self.insert_employment_only(dicts, _type)
except Exception as e:
print(e)
print('enh failed', enh)
raise e
# print('Update Done!')
@staticmethod
def delete(enh, _type):
""" Delete data from given entities
Args:
-----
enh: list of company ids (enhedsnummer)
_type: object type to delete
"""
delete_table_models = [alchemy_tables.Update,
alchemy_tables.Adresseupdate,
alchemy_tables.Attributter,
alchemy_tables.Livsforloeb,
alchemy_tables.Aarsbeskaeftigelse,
alchemy_tables.Kvartalsbeskaeftigelse,
alchemy_tables.Maanedsbeskaeftigelse,
alchemy_tables.erstAarsbeskaeftigelse,
alchemy_tables.erstKvartalsbeskaeftigelse,
alchemy_tables.erstMaanedsbeskaeftigelse,
alchemy_tables.SpaltningFusion]
if _type == 'Vrvirksomhed':
static_table = alchemy_tables.Virksomhed
elif _type == 'VrproduktionsEnhed':
static_table = alchemy_tables.Produktion
elif _type == 'Vrdeltagerperson':
static_table = alchemy_tables.Person
else:
print('bad _type: ', _type)
raise Exception('bad _type')
delete_table_models.append(static_table)
# delete independently from several tables. Lets thread them
# maybe threadpool is faster...
def worker(work_idx):
session = create_session()
table_class = delete_table_models[work_idx]
session.query(table_class).filter(table_class.enhedsnummer.in_(enh)).delete(synchronize_session=False)
session.commit()
session.close()
def enh_worker():
session = create_session()
session.query(alchemy_tables.Enhedsrelation). \
filter(alchemy_tables.Enhedsrelation.enhedsnummer_virksomhed.in_(enh)). \
delete(synchronize_session=False)
session.commit()
session.close()
threads = []
if _type == 'Vrvirksomhed':
t = threading.Thread(target=enh_worker)
threads.append(t)
t.start()
for i in range(len(delete_table_models)):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
@staticmethod
def delete_employment_only(enh):
""" Delete from employment tables only """
delete_table_models = [alchemy_tables.Aarsbeskaeftigelse,
alchemy_tables.Kvartalsbeskaeftigelse,
alchemy_tables.Maanedsbeskaeftigelse,
alchemy_tables.erstAarsbeskaeftigelse,
alchemy_tables.erstKvartalsbeskaeftigelse,
alchemy_tables.erstMaanedsbeskaeftigelse
]
def worker(work_idx):
session = create_session()
table_class = delete_table_models[work_idx]
session.query(table_class).filter(table_class.enhedsnummer.in_(enh)).delete(synchronize_session=False)
session.commit()
session.close()
threads = []
for i in range(len(delete_table_models)):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
def insert(self, dicts, enh_type):
""" Insert data from dicts
Args:
:param dicts: list of dicts with cvr data (Danish Business Authority)
:param enh_type: cvr object type
"""
data_parser = data_scanner.DataParser(_type=enh_type)
address_parser = self.address_parser_factory.create_parser(self.update_address)
#print('parse data')
data_parser.parse_data(dicts)
#print('parse dynamic data')
data_parser.parse_dynamic_data(dicts)
#print('parse address')
address_parser.parse_address_data(dicts)
# print('address data inserted/skipped - start static')
#print('parse static data')
data_parser.parse_static_data(dicts)
# print('static parsed')
def insert_employment_only(self, dicts, enh_type):
""" Inserts only employment data - needed to to missing version id when employment data updated in CVR"""
data_parser = data_scanner.DataParser(_type=enh_type)
data_parser.parse_employment(dicts)
@staticmethod
def get_samtid_dict(table):
session = create_session()
query = session.query(table.enhedsnummer,
table.samtid,
table.sidstopdateret)
existing_data = [(x[0], x[1], x[2]) for x in query.all()]
tmp = {a: CvrConnection.update_info(samtid=b, sidstopdateret=c) for (a, b, c) in existing_data}
session.close()
return tmp
@staticmethod
def make_samtid_dict():
""" Make mapping from entity id to current version
Add threading to run in parallel to see if that increase speed. Use threadpool instad of concurrent_future
"""
logger = logging.getLogger('cvrparser')
logger.info('Make id -> samtId map: units update status map')
table_models = [alchemy_tables.Virksomhed, alchemy_tables.Produktion, alchemy_tables.Person]
enh_samtid_map = {}
def worker(table_idx):
table = table_models[table_idx]
tmp = CvrConnection.get_samtid_dict(table)
enh_samtid_map.update(tmp)
threads = []
for i in range(len(table_models)):
t = threading.Thread(target=worker, args=(i, ))
threads.append(t)
t.start()
for t in threads:
t.join()
logger.info('Id map done')
return enh_samtid_map
def update_from_mixed_file(self, filename, force=False):
""" splits data in file by type and updates the database
:param filename: str, filename full path
:param force: bool, force to update all
:return:
"""
print('Start Reading From File', filename)
if force:
enh_samtid_map = {}
else:
enh_samtid_map = self.make_samtid_dict()
dummy = CvrConnection.update_info(samtid=-1, sidstopdateret=self.dummy_date)
dicts = {x: list() for x in self.source_keymap.values()}
with open(filename) as f:
for line in tqdm.tqdm(f):
raw_dat = json.loads(line)
keys = raw_dat.keys()
dict_type_set = keys & self.source_keymap.values() # intersects the two key sets
if len(dict_type_set) != 1:
add_error('BAD DICT DOWNLOADED {0}'.format(str(raw_dat)))
continue
dict_type = dict_type_set.pop()
dat = raw_dat[dict_type]
enhedsnummer = dat['enhedsNummer']
samtid = dat['samtId']
if dat['samtId'] is None:
add_error('Samtid none. '.format(enhedsnummer))
dat['samtId'] = -1
samtid = -1
current_update = enh_samtid_map[enhedsnummer] if enhedsnummer in enh_samtid_map else dummy
if samtid > current_update.samtid:
# update if new version - currently or sidstopdateret > current_update.sidstopdateret:
dicts[dict_type].append(dat)
if len(dicts[dict_type]) >= self.update_batch_size:
self.update(dicts[dict_type], dict_type)
dicts[dict_type].clear()
for enh_type, _dicts in dicts.items():
if len(_dicts) > 0:
self.update(_dicts, enh_type)
print('file read all updated')
def get_update_list_single_process(self):
""" Find units that needs updating and their sidstopdateret (last updated)
the sidstopdateret may be inaccurate and thus way to far back in time therefore we cannot use take the largest
of sidstopdateret from the database. Seems we download like 600 dicts a second with match_all.
Should take around 2 hours and 30 minuttes then. This takes 30 so i need to save half an hour on downloads.
:return datetime (min sidstopdateret), list (enhedsnumer, sidstopdateret)
"""
enh_samtid_map = self.make_samtid_dict()
oldest_sidstopdateret = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)+datetime.timedelta(days=1)
update_dicts = {x: {'units': [], 'sidstopdateret': oldest_sidstopdateret} for x in self.source_keymap.values()}
if len(enh_samtid_map) == 0:
return update_dicts
dummy = CvrConnection.update_info(samtid=-1, sidstopdateret=self.dummy_date)
print('Get update time for all data')
for _type in self.source_keymap.values():
search = Search(using=self.elastic_client, index=self.index)
search = search.query('match_all')
sidst_key = '{0}.sidstOpdateret'.format(_type)
samt_key = '{0}.samtId'.format(_type)
field_list = ['_id', sidst_key, samt_key]
# field_list = ['_id'] + ['{0}.sidstOpdateret'.format(key) for key in self.source_keymap.values()] + \
# ['{0}.samtId'.format(key) for key in self.source_keymap.values()]
search = search.fields(fields=field_list)
params = {'scroll': self.elastic_search_scroll_time, 'size': 2**12}
search = search.params(**params)
print('ElasticSearch Query: ', search.to_dict())
generator = search.scan()
for cvr_update in tqdm.tqdm(generator):
enhedsnummer = int(cvr_update.meta.id)
raw_dat = cvr_update.to_dict()
samtid = raw_dat[samt_key][0] if samt_key in raw_dat else None
sidstopdateret = raw_dat[sidst_key][0] if sidst_key in raw_dat else None
if sidstopdateret is None or samtid is None:
continue
current_update = enh_samtid_map[enhedsnummer] if enhedsnummer in enh_samtid_map else dummy
if samtid > current_update.samtid:
utc_sidstopdateret = utc_transform(sidstopdateret)
update_dicts[_type]['sidstopdateret'] = min(utc_sidstopdateret,
update_dicts[_type]['sidstopdateret'])
update_dicts[_type]['units'].append((enhedsnummer, utc_sidstopdateret))
# break
print('Update Info: ')
print([(k, v['sidstopdateret'], len(v['units'])) for k, v in update_dicts.items()])
return update_dicts
def get_update_list_type(self, _type):
return update_time_worker((_type, self.url, self.user, self.password, self.index))
def get_update_list(self):
""" Threaded version - may not be so IO wait bound since we stream
so maybe change to process pool instead """
pool = Pool(processes=3)
result = pool.map(update_time_worker, [(x, self.url, self.user, self.password, self.index)
for x in self.source_keymap.values()], chunksize=1)
update_dicts = {x: y for (x, y) in result}
print([(k, v['sidstopdateret'], len(v['units'])) for k, v in update_dicts.items()])
return update_dicts
def optimize_download_updated(self, update_info):
""" Due to a missing sidstopdateret for employment updates in cvr
the sidstopdateret may be inaccurate and thus way to far back in time
Update the self.max_download_size oldest to see if that helps us use a reasonable sidstopdateret data
maybe it is actually the punits that gives the biggest issue here.
:param update_info:
:return:
"""
for _type, info in update_info.items():
units = info['units']
if len(units) < self.max_download_size:
enh = [x[0] for x in units]
self.update_units(enh)
info['units'] = []
info['sidstopdateret'] = None
else:
sorted(units, key=lambda x: x[1])
first_enh = [x[0] for x in units[0:self.max_download_size]]
new_sidst_opdateret = units[self.max_download_size][1]
self.update_units(first_enh)
info['units'] = units[self.max_download_size:]
info['sidstopdateret'] = new_sidst_opdateret
return update_info
def find_missing(self):
"""
Check if we are missing anything
:return:
"""
search = Search(using=self.elastic_client, index=self.index)
search = search.query('match_all')
field_list = ['_id']
search = search.fields(fields=field_list)
params = {'scroll': self.elastic_search_scroll_time, 'size': 2*self.elastic_search_scan_size}
search = search.params(**params)
print('ElasticSearch Query: ', search.to_dict())
generator = search.scan()
ids = [x.meta.id for x in generator]
return ids
def update_time_worker(args):
_type = args[0]
url = args[1]
user = args[2]
password = args[3]
index = args[4]
enh_samtid_map = CvrConnection.make_samtid_dict()
dummy_date = datetime.datetime(year=1001, month=1, day=1, tzinfo=pytz.utc)
dummy = CvrConnection.update_info(samtid=-1, sidstopdateret=dummy_date)
oldest_sidstopdateret = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + datetime.timedelta(days=1)
type_dict = {'units': [], 'sidstopdateret': oldest_sidstopdateret}
if len(enh_samtid_map) == 0:
return type_dict
elastic_client = create_elastic_connection(url, (user, password))
search = Search(using=elastic_client, index=index).query('match_all')
sidst_key = '{0}.sidstOpdateret'.format(_type)
samt_key = '{0}.samtId'.format(_type)
field_list = ['_id', sidst_key, samt_key]
search = search.fields(fields=field_list)
params = {'scroll': '10m', 'size': 2 ** 12}
search = search.params(**params)
print('ElasticSearch Query: ', search.to_dict())
generator = search.scan()
for cvr_update in generator:
enhedsnummer = int(cvr_update.meta.id)
raw_dat = cvr_update.to_dict()
samtid = raw_dat[samt_key][0] if samt_key in raw_dat else None
sidstopdateret = raw_dat[sidst_key][0] if sidst_key in raw_dat else None
if sidstopdateret is None or samtid is None:
continue
current_update = enh_samtid_map[enhedsnummer] if enhedsnummer in enh_samtid_map else dummy
if samtid > current_update.samtid:
utc_sidstopdateret = utc_transform(sidstopdateret)
type_dict['sidstopdateret'] = min(utc_sidstopdateret, type_dict['sidstopdateret'])
type_dict['units'].append((enhedsnummer, utc_sidstopdateret))
return _type, type_dict
def retry_generator(g):
failed = 0
while True:
try:
yield next(g)
except StopIteration:
return
except Exception as e:
print('retry generator', failed)
failed += 1
print(e)
if failed > 3:
raise
def cvr_update_producer(queue, lock):
""" Producer function that places data to be inserted on the Queue
:param queue: multiprocessing.Queue
:param lock: multiprocessing.Lock
"""
t0 = time.time()
logger = logging.getLogger('producer')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('producer.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
with lock:
logger.info('Starting producer => {}'.format(os.getpid()))
if not engine.is_none():
engine.dispose()
else:
setup_database_connection()
with lock:
logger.info('setup databaseconnection - lost in spawn/fork')
try:
cvr = CvrConnection()
enh_samtid_map = CvrConnection.make_samtid_dict()
dummy = CvrConnection.update_info(samtid=-1, sidstopdateret=CvrConnection.dummy_date)
params = {'scroll': cvr.elastic_search_scroll_time, 'size': cvr.elastic_search_scan_size}
search = Search(using=cvr.elastic_client, index=cvr.index).query('match_all').params(**params)
# search = Search(using=cvr.elastic_client, index=cvr.index).query(elasticsearch_dsl.query.MatchAll()).params(**params)
generator = search.scan()
full_update = False
i = 0
for obj in tqdm.tqdm(generator):
try:
i = i+1
dat = obj.to_dict()
keys = dat.keys()
dict_type_set = keys & CvrConnection.source_keymap.values() # intersects the two key sets
if len(dict_type_set) != 1:
print(dict_type_set)
logger.debug('BAD DICT DOWNLOADED CVR UPDATE PRODUCER \n{0} {1}'.format(dat, dict_type_set))
continue
dict_type = dict_type_set.pop()
dat = dat[dict_type]
enhedsnummer = dat['enhedsNummer']
samtid = dat['samtId']
if dat['samtId'] is None:
add_error('Samtid none: enh {0}'.format(enhedsnummer))
dat['samtId'] = -1
samtid = -1
current_update = enh_samtid_map[enhedsnummer] if enhedsnummer in enh_samtid_map else dummy
if samtid > current_update.samtid:
full_update = True
else:
if dict_type == 'Vrdeltagerperson':
continue
full_update = False
for repeat in range(1000):
try:
queue.put((dict_type, dat, full_update), timeout=120)
break
except Exception as e:
logger.debug('Producer timeout failed {0} - retrying {1} - {2} - repeat: {3}'.format(str(e), enhedsnummer, dict_type, repeat))
if (i % 10000 == 0):
logger.debug('{0} rounds'.format(i))
except Exception as e:
logger.debug('Producer exception: e: {0} - obj: {1}'.format(e, obj))
print('continue producer')
# print(obj)
# if ((i+1) % 10000) == 0:
# with lock:
# print('{0} objects parsed and inserted into queue'.format(i))
except Exception as e:
print('*** generator error ***', file=sys.stderr)
logger.debug('generator error: {0}'.format(str(e)))
print(e)
print(type(e))
#logger.info(e)
#logger.info(type(e))
return
# Synchronize access to the console
with lock:
logger.info('objects parsing done')
t1 = time.time()
with lock:
logger.info('Producer Done. Exiting...{0}'.format(os.getpid()))
logger.info('Producer Time Used: {0}'.format(t1-t0))
# queue.put(cvr.cvr_sentinel)
# queue.put(cvr.cvr_sentinel)
def test_producer():
print('test producer')
class dumqueue():
def __init__(self):
self.counter = {}
def put(self, obj, timeout=None):
dict_type = obj[0]
if dict_type in self.counter:
self.counter[dict_type] += 1
else:
self.counter[dict_type] = 1
#dat = obj[1]
class dumlock():
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
cvr_update_producer(dumqueue(), dumlock())
def cvr_update_consumer(queue, lock):
"""
:param queue: multiprocessing.Queue
:param lock: multiprocessing.Lock
:return:
"""
t0 = time.time()
logger = logging.getLogger('consumer-{0}'.format(os.getpid()))
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('consumer_{0}.log'.format(os.getpid()))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
logger.info('Starting consumer => {}'.format(os.getpid()))
if not engine.is_none():
engine.dispose()
else:
setup_database_connection()
with lock:
logger.info('setup database connection - lost in spawn/fork')
cvr = CvrConnection()
# enh_samtid_map = CvrConnection.make_samtid_dict()
# dummy = CvrConnection.update_info(samtid=-1, sidstopdateret=CvrConnection.dummy_date)
dicts = {x: list() for x in CvrConnection.source_keymap.values()}
emp_dicts = {x: list() for x in CvrConnection.source_keymap.values()}
i = 0
while True:
# try:
i = i+1
while True:
try:
#logger.info('get data {0}'.format(i))
obj = queue.get(timeout=10)
break
except Exception as e:
logger.debug('Consumer timeout reached - retrying - e: {0}'.format(e))
#print(e)
try: # move this
if obj == cvr.cvr_sentinel:
logger.info('sentinel found - Thats it im out of here')
# queue.put(obj)
break
#logger.info('Got some data {0}'.format(i))
assert len(obj) == 3, 'obj not length 2 - should be tuple of length 3'
dict_type = obj[0]
dat = obj[1]
full_update = obj[2]
if full_update:
dicts_to_use = dicts
else:
dicts_to_use = emp_dicts
dicts_to_use[dict_type].append(dat)
if len(dicts_to_use[dict_type]) >= cvr.update_batch_size:
#logger.info('try to insert {0}'.format(i))
t0 = time.time()
if full_update:
cvr.update(dicts_to_use[dict_type], dict_type)
else:
cvr.update_employment_only(dicts_to_use[dict_type], dict_type)
used_time = time.time() - t0
#logger.info(' - {0} time used - data inserted {1}'.format(used_time, len(dicts_to_use[dict_type])))
dicts_to_use[dict_type].clear()
except Exception as e:
logger.debug('Exception in consumer: {0} - {1}'.format(os.getpid(), str(e)))
logger.debug('insert one by one')
print('Exception in consumer: {0} - {1}'.format(os.getpid(), str(e)))
for enh_type, _dicts in dicts.items():
for one_dict in _dicts:
logger.debug('inserting {0}'.format(one_dict['enhedsNummer']))
try:
if full_update:
cvr.update([one_dict], enh_type)
else:
logger.debug('error in emp only')
cvr.update_employment_only([one_dict], enh_type)
except Exception as e:
logger.debug('one insert error\n{0}'.format(str(e)))
logger.debug('enh failed: {0}'.format(one_dict['enhedsNummer']))
if i % 10000 == 0:
logger.debug('Consumer {0} rounds completed and alive - '.format(i))
# except Exception as e:
# print('Consumer exception', e)
# import pdb
# pdb.set_trace()
for enh_type, _dicts in dicts.items():
if len(_dicts) > 0:
cvr.update(_dicts, enh_type)
for enh_type, _dicts in emp_dicts.items():
if len(_dicts) > 0:
cvr.update_employment_only(_dicts, enh_type)
t1 = time.time()
with lock:
print('Consumer Done. Exiting...{0} - time used {1}'.format(os.getpid(), t1-t0))
|
bigipconfigdriver.py | #!/usr/bin/env python
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import fcntl
import hashlib
import json
import logging
import os
import os.path
import signal
import sys
import threading
import time
import traceback
import pyinotify
from urlparse import urlparse
from f5_cccl.api import F5CloudServiceManager
from f5_cccl.exceptions import F5CcclError
from f5_cccl.utils.mgmt import mgmt_root
from f5_cccl.utils.profile import (delete_unused_ssl_profiles,
create_client_ssl_profile,
create_server_ssl_profile)
log = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
root_logger = logging.getLogger()
root_logger.addHandler(console)
class ResponseStatusFilter(logging.Filter):
def filter(self, record):
return not record.getMessage().startswith("RESPONSE::STATUS")
class CertFilter(logging.Filter):
def filter(self, record):
return "CERTIFICATE" not in record.getMessage()
class KeyFilter(logging.Filter):
def filter(self, record):
return "PRIVATE KEY" not in record.getMessage()
root_logger.addFilter(ResponseStatusFilter())
root_logger.addFilter(CertFilter())
root_logger.addFilter(KeyFilter())
DEFAULT_LOG_LEVEL = logging.INFO
DEFAULT_VERIFY_INTERVAL = 30.0
NET_SCHEMA_NAME = 'cccl-net-api-schema.yml'
class CloudServiceManager():
"""CloudServiceManager class.
Applies a configuration to a BigIP
Args:
bigip: ManagementRoot object
partition: BIG-IP partition to manage
"""
def __init__(self, bigip, partition, user_agent=None, prefix=None,
schema_path=None):
"""Initialize the CloudServiceManager object."""
self._mgmt_root = bigip
self._schema = schema_path
self._cccl = F5CloudServiceManager(
bigip,
partition,
user_agent=user_agent,
prefix=prefix,
schema_path=schema_path)
def mgmt_root(self):
""" Return the BIG-IP ManagementRoot object"""
return self._mgmt_root
def get_partition(self):
""" Return the managed partition."""
return self._cccl.get_partition()
def get_schema_type(self):
"""Return 'ltm' or 'net', based on schema type."""
if self._schema is None:
return 'ltm'
elif 'net' in self._schema:
return 'net'
def _apply_ltm_config(self, config):
"""Apply the ltm configuration to the BIG-IP.
Args:
config: BIG-IP config dict
"""
return self._cccl.apply_ltm_config(config)
def _apply_net_config(self, config):
"""Apply the net configuration to the BIG-IP."""
return self._cccl.apply_net_config(config)
def get_proxy(self):
"""Called from 'CCCL' delete_unused_ssl_profiles"""
return self._cccl.get_proxy()
class IntervalTimerError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class IntervalTimer(object):
def __init__(self, interval, cb):
float(interval)
if 0 >= interval:
raise IntervalTimerError("interval must be greater than 0")
if not cb or not callable(cb):
raise IntervalTimerError("cb must be callable object")
self._cb = cb
self._interval = interval
self._execution_time = 0.0
self._running = False
self._timer = None
self._lock = threading.RLock()
def _set_execution_time(self, start_time, stop_time):
if stop_time >= start_time:
self._execution_time = stop_time - start_time
else:
self._execution_time = 0.0
def _adjust_interval(self):
adjusted_interval = self._interval - self._execution_time
if adjusted_interval < 0.0:
adjusted_interval = 0.0
self._execution_time = 0.0
return adjusted_interval
def _run(self):
start_time = time.clock()
try:
self._cb()
except Exception:
log.exception('Unexpected error')
finally:
with self._lock:
stop_time = time.clock()
self._set_execution_time(start_time, stop_time)
if self._running:
self.start()
def is_running(self):
return self._running
def start(self):
with self._lock:
if self._running:
# restart timer, possibly with a new interval
self.stop()
self._timer = threading.Timer(self._adjust_interval(), self._run)
# timers can't be stopped, cancel just prevents the callback from
# occuring when the timer finally expires. Make it a daemon allows
# cancelled timers to exit eventually without a need for join.
self._timer.daemon = True
self._timer.start()
self._running = True
def stop(self):
with self._lock:
if self._running:
self._timer.cancel()
self._timer = None
self._running = False
class ConfigError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def create_ltm_config(partition, config):
"""Extract a BIG-IP configuration from the LTM configuration.
Args:
config: BigIP config
"""
ltm = {}
if 'resources' in config and partition in config['resources']:
ltm = config['resources'][partition]
return ltm
def create_network_config(config):
"""Extract a BIG-IP Network configuration from the network config.
Args:
config: BigIP config which contains vxlan defs
"""
net = {}
if 'vxlan-fdb' in config:
net['userFdbTunnels'] = [config['vxlan-fdb']]
if ('vxlan-arp' in config and 'arps' in config['vxlan-arp']
and config['vxlan-arp']['arps'] is not None):
net['arps'] = config['vxlan-arp']['arps']
log.debug("NET Config: %s", json.dumps(net))
return net
def _create_custom_profiles(mgmt, partition, custom_profiles):
incomplete = 0
# Server profiles may reference a CA cert in another server profile.
# These need to be loaded first.
for profile in custom_profiles:
caFile = profile.get('caFile', '')
if profile['context'] == 'serverside' and caFile == "self":
incomplete += create_server_ssl_profile(mgmt, partition, profile)
for profile in custom_profiles:
if profile['context'] == 'clientside':
incomplete += create_client_ssl_profile(mgmt, partition, profile)
elif profile['context'] == 'serverside':
caFile = profile.get('caFile', '')
if caFile != "self":
incomplete += create_server_ssl_profile(
mgmt, partition, profile)
else:
log.error(
"Only client or server custom profiles are supported.")
return incomplete
def _delete_unused_ssl_profiles(mgr, partition, config):
return delete_unused_ssl_profiles(mgr, partition, config)
class ConfigHandler():
def __init__(self, config_file, managers, verify_interval):
self._config_file = config_file
self._managers = managers
self._condition = threading.Condition()
self._thread = threading.Thread(target=self._do_reset)
self._pending_reset = False
self._stop = False
self._backoff_time = 1
self._backoff_timer = None
self._max_backoff_time = 128
self._verify_interval = verify_interval
self._interval = IntervalTimer(self._verify_interval,
self.notify_reset)
self._thread.start()
def stop(self):
self._condition.acquire()
self._stop = True
self._condition.notify()
self._condition.release()
if self._backoff_timer is not None:
self.cleanup_backoff()
def notify_reset(self):
self._condition.acquire()
self._pending_reset = True
self._condition.notify()
self._condition.release()
def _do_reset(self):
log.debug('config handler thread start')
with self._condition:
while True:
self._condition.acquire()
if not self._pending_reset and not self._stop:
self._condition.wait()
log.debug('config handler woken for reset')
self._pending_reset = False
self._condition.release()
if self._stop:
log.info('stopping config handler')
if self._backoff_timer is not None:
self.cleanup_backoff()
break
start_time = time.time()
incomplete = 0
try:
config = _parse_config(self._config_file)
# No 'resources' indicates that the controller is not
# yet ready -- it does not mean to apply an empty config
if 'resources' not in config:
continue
incomplete = self._update_cccl(config)
except ValueError:
formatted_lines = traceback.format_exc().splitlines()
last_line = formatted_lines[-1]
log.error('Failed to process the config file {} ({})'
.format(self._config_file, last_line))
incomplete = 1
except Exception:
log.exception('Unexpected error')
incomplete = 1
if incomplete:
# Error occurred, perform retries
self.handle_backoff()
else:
if (self._interval and self._interval.is_running()
is False):
self._interval.start()
self._backoff_time = 1
if self._backoff_timer is not None:
self.cleanup_backoff()
perf_enable = os.environ.get('SCALE_PERF_ENABLE')
if perf_enable: # pragma: no cover
test_data = {}
app_count = 0
backend_count = 0
for service in config['resources']['test'][
'virtualServers']:
app_count += 1
backends = 0
for pool in config['resources']['test']['pools']:
if service['name'] in pool['name']:
backends = len(pool['members'])
break
test_data[service['name']] = backends
backend_count += backends
test_data['Total_Services'] = app_count
test_data['Total_Backends'] = backend_count
test_data['Time'] = time.time()
json_data = json.dumps(test_data)
log.info('SCALE_PERF: Test data: %s',
json_data)
log.debug('updating tasks finished, took %s seconds',
time.time() - start_time)
if self._interval:
self._interval.stop()
def _update_cccl(self, config):
_handle_vxlan_config(config)
cfg_net = create_network_config(config)
incomplete = 0
for mgr in self._managers:
partition = mgr.get_partition()
cfg_ltm = create_ltm_config(partition, config)
try:
# Manually create custom profiles;
# CCCL doesn't yet do this
if 'customProfiles' in cfg_ltm and \
mgr.get_schema_type() == 'ltm':
tmp = 0
tmp = _create_custom_profiles(
mgr.mgmt_root(),
partition,
cfg_ltm['customProfiles'])
incomplete += tmp
# Apply the BIG-IP config after creating profiles
# and before deleting profiles
if mgr.get_schema_type() == 'net':
incomplete += mgr._apply_net_config(cfg_net)
else:
incomplete += mgr._apply_ltm_config(cfg_ltm)
# Manually delete custom profiles (if needed)
if mgr.get_schema_type() == 'ltm':
_delete_unused_ssl_profiles(
mgr,
partition,
cfg_ltm)
except F5CcclError as e:
# We created an invalid configuration, raise the
# exception and fail
log.error("CCCL Error: %s", e.msg)
incomplete += 1
return incomplete
def cleanup_backoff(self):
"""Cleans up canceled backoff timers."""
self._backoff_timer.cancel()
self._backoff_timer.join()
self._backoff_timer = None
def handle_backoff(self):
"""Wrapper for calls to retry_backoff."""
if (self._interval and self._interval.is_running() is
True):
self._interval.stop()
if self._backoff_timer is None:
self.retry_backoff()
def retry_backoff(self):
"""Add a backoff timer to retry in case of failure."""
def timer_cb():
self._backoff_timer = None
self.notify_reset()
self._backoff_timer = threading.Timer(
self._backoff_time, timer_cb
)
log.error("Error applying config, will try again in %s seconds",
self._backoff_time)
self._backoff_timer.start()
if self._backoff_time < self._max_backoff_time:
self._backoff_time *= 2
class ConfigWatcher(pyinotify.ProcessEvent):
def __init__(self, config_file, on_change):
basename = os.path.basename(config_file)
if not basename or 0 == len(basename):
raise ConfigError('config_file must be a file path')
self._config_file = config_file
self._on_change = on_change
self._config_dir = os.path.dirname(self._config_file)
self._config_stats = None
if os.path.exists(self._config_file):
try:
self._config_stats = self._digest()
except IOError as ioe:
log.warning('ioerror during sha sum calculation: {}'.
format(ioe))
self._running = False
self._polling = False
self._user_abort = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
self._user_abort = True
self._running = False
def _loop_check(self, notifier):
if self._polling:
log.debug('inotify loop ended - returning to polling mode')
return True
else:
return False
def loop(self):
self._running = True
if not os.path.exists(self._config_dir):
log.info(
'configured directory doesn\'t exist {}, entering poll loop'.
format(self._config_dir))
self._polling = True
while self._running:
try:
while self._polling:
if self._polling:
if os.path.exists(self._config_dir):
log.debug('found watchable directory - {}'.format(
self._config_dir))
self._polling = False
break
else:
log.debug('waiting for watchable directory - {}'.
format(self._config_dir))
time.sleep(1)
_wm = pyinotify.WatchManager()
_notifier = pyinotify.Notifier(_wm, default_proc_fun=self)
_notifier.coalesce_events(True)
mask = (pyinotify.IN_CREATE | pyinotify.IN_DELETE |
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO |
pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVE_SELF |
pyinotify.IN_DELETE_SELF)
_wm.add_watch(
path=self._config_dir,
mask=mask,
quiet=False,
exclude_filter=lambda path: False)
log.info('entering inotify loop to watch {}'.format(
self._config_file))
_notifier.loop(callback=self._loop_check)
if (not self._polling and _notifier._fd is None):
log.info('terminating')
self._running = False
except Exception as e:
log.warning(e)
if self._user_abort:
log.info('Received user kill signal, terminating.')
def _digest(self):
sha = hashlib.sha256()
with open(self._config_file, 'rb') as f:
fcntl.lockf(f.fileno(), fcntl.LOCK_SH, 0, 0, 0)
while True:
buf = f.read(4096)
if not buf:
break
sha.update(buf)
fcntl.lockf(f.fileno(), fcntl.LOCK_UN, 0, 0, 0)
return sha.digest()
def _should_watch(self, pathname):
if pathname == self._config_file:
return True
return False
def _is_changed(self):
changed = False
cur_hash = None
if not os.path.exists(self._config_file):
if cur_hash != self._config_stats:
changed = True
else:
changed = False
else:
try:
cur_hash = self._digest()
if cur_hash != self._config_stats:
changed = True
else:
changed = False
except IOError as ioe:
log.warning('ioerror during sha sum calculation: {}'.
format(ioe))
return (changed, cur_hash)
def process_default(self, event):
if (pyinotify.IN_DELETE_SELF == event.mask or
pyinotify.IN_MOVE_SELF == event.mask):
log.warn(
'watchpoint {} has been moved or destroyed, using poll loop'.
format(self._config_dir))
self._polling = True
if self._config_stats is not None:
log.debug('config file {} changed, parent gone'.format(
self._config_file))
self._config_stats = None
self._on_change()
if self._should_watch(event.pathname):
(changed, sha) = self._is_changed()
if changed:
log.debug('config file {0} changed - signalling bigip'.format(
self._config_file, self._config_stats, sha))
self._config_stats = sha
self._on_change()
def _parse_config(config_file):
def _file_exist_cb(log_success):
if os.path.exists(config_file):
if log_success:
log.info('Config file: {} found'.format(config_file))
return (True, None)
else:
return (False, 'Waiting for config file {}'.format(config_file))
_retry_backoff(_file_exist_cb)
with open(config_file, 'r') as config:
fcntl.lockf(config.fileno(), fcntl.LOCK_SH, 0, 0, 0)
data = config.read()
fcntl.lockf(config.fileno(), fcntl.LOCK_UN, 0, 0, 0)
config_json = json.loads(data)
log.debug('loaded configuration file successfully')
return config_json
def _handle_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config-file',
type=str,
required=True,
help='BigIp configuration file')
parser.add_argument(
'--ctlr-prefix',
type=str,
required=True,
help='Controller name prefix'
)
args = parser.parse_args()
basename = os.path.basename(args.config_file)
if not basename or 0 == len(basename):
raise ConfigError('must provide a file path')
args.config_file = os.path.realpath(args.config_file)
return args
def _handle_global_config(config):
level = DEFAULT_LOG_LEVEL
verify_interval = DEFAULT_VERIFY_INTERVAL
if config and 'global' in config:
global_cfg = config['global']
if 'log-level' in global_cfg:
log_level = global_cfg['log-level']
try:
level = logging.getLevelName(log_level.upper())
except (AttributeError):
log.warn('The "global:log-level" field in the configuration '
'file should be a string')
if 'verify-interval' in global_cfg:
try:
verify_interval = float(global_cfg['verify-interval'])
if verify_interval < 0:
verify_interval = DEFAULT_VERIFY_INTERVAL
log.warn('The "global:verify-interval" field in the '
'configuration file should be a non-negative '
'number')
except (ValueError):
log.warn('The "global:verify-interval" field in the '
'configuration file should be a number')
vxlan_partition = global_cfg.get('vxlan-partition')
try:
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
except:
level = DEFAULT_LOG_LEVEL
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
log.warn('Undefined value specified for the '
'"global:log-level" field in the configuration file')
# level only is needed for unit tests
return verify_interval, level, vxlan_partition
def _handle_bigip_config(config):
if (not config) or ('bigip' not in config):
raise ConfigError('Configuration file missing "bigip" section')
bigip = config['bigip']
if 'username' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:username" section')
if 'password' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:password" section')
if 'url' not in bigip:
raise ConfigError('Configuration file missing "bigip:url" section')
if ('partitions' not in bigip) or (len(bigip['partitions']) == 0):
raise ConfigError('Configuration file must specify at least one '
'partition in the "bigip:partitions" section')
url = urlparse(bigip['url'])
host = url.hostname
port = url.port
if not port:
port = 8443
return host, port
def _handle_vxlan_config(config):
if config and 'vxlan-fdb' in config:
fdb = config['vxlan-fdb']
if 'name' not in fdb:
raise ConfigError('Configuration file missing '
'"vxlan-fdb:name" section')
if 'records' not in fdb:
raise ConfigError('Configuration file missing '
'"vxlan-fdb:records" section')
if config and 'vxlan-arp' in config:
arp = config['vxlan-arp']
if 'arps' not in arp:
raise ConfigError('Configuration file missing '
'"vxlan-arp:arps" section')
def _set_user_agent(prefix):
try:
with open('/app/vendor/src/f5/VERSION_BUILD.json', 'r') \
as version_file:
data = json.load(version_file)
user_agent = \
prefix + "-bigip-ctlr-" + data['version'] + '-' + data['build']
except Exception as e:
user_agent = prefix + "-bigip-ctlr-VERSION-UNKNOWN"
log.error("Could not read version file: %s", e)
return user_agent
def _retry_backoff(cb):
RETRY_INTERVAL = 1
log_interval = 0.5
elapsed = 0.5
log_success = False
while 1:
if log_interval > 0.5:
log_success = True
(success, val) = cb(log_success)
if success:
return val
if elapsed == log_interval:
elapsed = 0
log_interval *= 2
log.error("Encountered error: {}. Retrying for {} seconds.".format(
val, int(log_interval)
))
time.sleep(RETRY_INTERVAL)
elapsed += RETRY_INTERVAL
def _find_net_schema():
paths = [path for path in sys.path if 'site-packages' in path]
for path in paths:
for root, dirs, files in os.walk(path):
if NET_SCHEMA_NAME in files:
return os.path.join(root, NET_SCHEMA_NAME)
for root, dirs, files in os.walk('/app/src/f5-cccl'):
if NET_SCHEMA_NAME in files:
return os.path.join(root, NET_SCHEMA_NAME)
log.info('Could not find CCCL schema: {}'.format(NET_SCHEMA_NAME))
return ''
def main():
try:
args = _handle_args()
config = _parse_config(args.config_file)
verify_interval, _, vxlan_partition = _handle_global_config(config)
host, port = _handle_bigip_config(config)
# FIXME (kenr): Big-IP settings are currently static (we ignore any
# changes to these fields in subsequent updates). We
# may want to make the changes dynamic in the future.
# BIG-IP to manage
def _bigip_connect_cb(log_success):
try:
bigip = mgmt_root(
host,
config['bigip']['username'],
config['bigip']['password'],
port,
"tmos")
if log_success:
log.info('BIG-IP connection established.')
return (True, bigip)
except Exception, e:
return (False, 'BIG-IP connection error: {}'.format(e))
bigip = _retry_backoff(_bigip_connect_cb)
# Read version and build info, set user-agent for ICR session
user_agent = _set_user_agent(args.ctlr_prefix)
managers = []
for partition in config['bigip']['partitions']:
# Management for the BIG-IP partitions
manager = CloudServiceManager(
bigip,
partition,
user_agent=user_agent)
managers.append(manager)
if vxlan_partition:
# Management for net resources (VXLAN)
manager = CloudServiceManager(
bigip,
vxlan_partition,
user_agent=user_agent,
prefix=args.ctlr_prefix,
schema_path=_find_net_schema())
managers.append(manager)
handler = ConfigHandler(args.config_file,
managers,
verify_interval)
if os.path.exists(args.config_file):
handler.notify_reset()
watcher = ConfigWatcher(args.config_file, handler.notify_reset)
watcher.loop()
handler.stop()
except (IOError, ValueError, ConfigError) as e:
log.error(e)
sys.exit(1)
except Exception:
log.exception('Unexpected error')
sys.exit(1)
return 0
if __name__ == "__main__":
main()
|
mumbleBot.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble_py3 as pymumble
import pymumble_py3.constants
import variables as var
import logging
import logging.handlers
import traceback
import struct
from packaging import version
import util
import command
import constants
import media.playlist
from constants import tr_cli as tr
from database import SettingsDatabase, MusicDatabase, DatabaseMigration
from media.item import ValidationFailedError, PreparationFailedError
from media.cache import MusicCache
class MumbleBot:
version = 'git'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info(f"bot: botamusique version {self.get_version()}, starting...")
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.stereo = var.config.getboolean('bot', 'stereo', fallback=True)
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
var.user = args.user
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
# Flags to indicate the bot is exiting (Ctrl-C, or !kill)
self.exit = False
self.nb_exit = 0
# Related to ffmpeg thread
self.thread = None
self.thread_stderr = None
self.read_pcm_size = 0
self.pcm_buffer_size = 0
self.last_ffmpeg_err = ""
# Play/pause status
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1 # current position in a song.
self.song_start_at = -1
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
#
self.on_interrupting = False
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
if args.bandwidth:
self.bandwidth = args.bandwidth
else:
self.bandwidth = var.config.getint("bot", "bandwidth")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens,
stereo=self.stereo,
debug=var.config.getboolean('debug', 'mumbleConnection'),
certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
if self.mumble.connected >= pymumble.constants.PYMUMBLE_CONN_STATE_FAILED:
exit()
self.set_comment()
self.set_avatar()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(self.bandwidth)
self._user_in_channel = self.get_user_count_in_channel()
# ====== Volume ======
self.volume_helper = util.VolumeHelper()
_volume = var.config.getfloat('bot', 'volume', fallback=0.8)
if var.db.has_option('bot', 'volume'):
_volume = var.db.getfloat('bot', 'volume')
self.volume_helper.set_volume(_volume)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
self.last_volume_cycle_time = time.time()
self._ducking_volume = 0
_ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.50)
_ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=_ducking_volume)
self.volume_helper.set_ducking_volume(_ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False) \
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
assert var.config.get("bot", "when_nobody_in_channel") in ['pause', 'pause_resume', 'stop', 'nothing', ''], \
"Unknown action for when_nobody_in_channel"
if var.config.get("bot", "when_nobody_in_channel", fallback='') in ['pause', 'pause_resume', 'stop']:
user_change_callback = \
lambda user, action: threading.Thread(target=self.users_changed,
args=(user, action), daemon=True).start()
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, user_change_callback)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, user_change_callback)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
self.redirect_ffmpeg_log = var.config.getboolean('debug', 'redirect_ffmpeg_log', fallback=True)
if var.config.getboolean("bot", "auto_check_update"):
def check_update():
nonlocal self
new_version, changelog = util.check_update(self.get_version())
if new_version:
self.send_channel_msg(tr('new_version_found', new_version=new_version, changelog=changelog))
th = threading.Thread(target=check_update, name="UpdateThread")
th.daemon = True
th.start()
last_startup_version = var.db.get("bot", "version", fallback=None)
if not last_startup_version or version.parse(last_startup_version) < version.parse(self.version):
var.db.set("bot", "version", self.version)
if var.config.getboolean("bot", "auto_check_update"):
changelog = util.fetch_changelog()
self.send_channel_msg(tr("update_successful", version=self.version, changelog=changelog))
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
self.exit = True
def get_version(self):
if self.version != "git":
return self.version
else:
return util.get_snapshot_version()
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False, admin=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel,
'admin': admin}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def set_avatar(self):
avatar_path = var.config.get('bot', 'avatar', fallback=None)
if avatar_path:
with open(avatar_path, 'rb') as avatar_file:
self.mumble.users.myself.texture(avatar_file.read())
else:
self.mumble.users.myself.texture(b'')
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
raw_message = text.message.strip()
message = re.sub(r'<.*?>', '', raw_message)
if text.actor == 0:
# Some server will send a welcome message to the bot once connected.
# It doesn't have a valid "actor". Simply ignore it here.
return
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
command_symbols = var.config.get('commands', 'command_symbol')
match = re.match(fr'^[{re.escape(command_symbols)}](?P<command>\S+)(?:\s(?P<argument>.*))?', message)
if match:
command = match.group("command").lower()
argument = match.group("argument") or ""
if not command:
return
self.log.info(f'bot: received command "{command}" with arguments "{argument}" from {user}')
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
tr('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('user_ban'))
return
if not self.is_admin(user) and argument:
input_url = util.get_url_from_input(argument)
if input_url and var.db.has_option('url_ban', input_url):
self.mumble.users[text.actor].send_text_message(
tr('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
tr('which_command', commands="<br>".join(matches)))
return
else:
self.mumble.users[text.actor].send_text_message(
tr('bad_command', command=command))
return
if self.cmd_handle[command_exc]['admin'] and not self.is_admin(user):
self.mumble.users[text.actor].send_text_message(tr('not_admin'))
return
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
tr('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, argument)
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error(f"bot: command {command_exc} failed with error: {error_traceback}\n")
self.send_msg(tr('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
@staticmethod
def is_admin(user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Other Mumble Events
# =======================
def get_user_count_in_channel(self):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
return len(own_channel.get_users())
def users_changed(self, user, message):
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
user_count = self.get_user_count_in_channel()
if user_count > self._user_in_channel and user_count == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause" and self.is_pause:
self.send_channel_msg(tr("auto_paused"))
elif user_count == 1 and len(var.playlist) != 0:
# if the bot is the only user left in the channel and the playlist isn't empty
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.log.info('bot: No user in my channel. Stop music now.')
self.clear()
else:
self.log.info('bot: No user in my channel. Pause music now.')
self.pause()
self._user_in_channel = user_count
# =======================
# Launch and Download
# =======================
def launch_music(self, music_wrapper, start_from=0):
assert music_wrapper.is_ready()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
channels = 2 if self.stereo else 1
self.pcm_buffer_size = 960 * channels
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ss', f"{start_from:f}", '-ac', str(channels), '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
if self.redirect_ffmpeg_log:
pipe_rd, pipe_wd = util.pipe_no_wait() # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
else:
pipe_rd, pipe_wd = None, None
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=self.pcm_buffer_size)
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item():
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info(f"bot: start preparing item in thread: {item.format_debug_string()}")
th.daemon = True
th.start()
return th
def start_download(self, item):
if not item.is_ready():
self.log.info("bot: current music isn't ready, start downloading.")
self.async_download(item)
self.send_channel_msg(
tr('download_in_progress', item=item.format_title()))
def _download(self, item):
ver = item.version
try:
item.validate()
if item.is_ready():
return True
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(item.id)
var.cache.free_and_delete(item.id)
return False
try:
item.prepare()
if item.version > ver:
var.playlist.version += 1
return True
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = f'Wait for buffer {self.mumble.sound_output.get_buffer_size():.3f}'
time.sleep(0.01)
raw_music = None
if self.thread:
# I get raw from ffmpeg thread
# move playhead forward
self._loop_status = 'Reading raw'
if self.song_start_at == -1:
self.song_start_at = time.time() - self.playhead
self.playhead = time.time() - self.song_start_at
raw_music = self.thread.stdout.read(self.pcm_buffer_size)
self.read_pcm_size += len(raw_music)
if self.redirect_ffmpeg_log:
try:
self.last_ffmpeg_err = self.thread_stderr.readline()
if self.last_ffmpeg_err:
self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n"))
except:
pass
if raw_music:
# Adjust the volume and send it to mumble
self.volume_cycle()
if not self.on_interrupting and len(raw_music) == self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(raw_music, 2, self.volume_helper.real_volume))
elif self.read_pcm_size == 0:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=True), 2, self.volume_helper.real_volume))
elif self.on_interrupting or len(raw_music) < self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=False), 2, self.volume_helper.real_volume))
self.thread.kill()
self.thread = None
time.sleep(0.1)
self.on_interrupting = False
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if not self.is_pause and not raw_music:
self.thread = None
# bot is not paused, but ffmpeg thread has gone.
# indicate that last song has finished, or the bot just resumed from pause, or something is wrong.
if self.read_pcm_size < self.pcm_buffer_size \
and var.playlist.current_index != -1 \
and self.last_ffmpeg_err:
current = var.playlist.current_item()
self.log.error("bot: cannot play music %s", current.format_debug_string())
self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err)
self.last_ffmpeg_err = ""
self.send_channel_msg(tr('unable_play', item=current.format_title()))
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
# move to the next song.
if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song.
if var.playlist.next():
current = var.playlist.current_item()
self.log.debug(f"bot: next into the song: {current.format_debug_string()}")
try:
self.start_download(current)
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = 0
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
else:
self._loop_status = 'Empty queue'
else:
# if wait_for_ready flag is true, means the pointer is already
# pointing to target song. start playing
current = var.playlist.current_item()
if current:
if current.is_ready():
self.wait_for_ready = False
self.read_pcm_size = 0
self.launch_music(current, self.playhead)
self.last_volume_cycle_time = time.time()
self.async_download_next()
elif current.is_failed():
var.playlist.remove_by_id(current.id)
self.wait_for_ready = False
else:
self._loop_status = 'Wait for the next item to be ready'
else:
self.wait_for_ready = False
while self.mumble.sound_output.get_buffer_size() > 0 and self.mumble.is_alive():
# Empty the buffer before exit
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
self._loop_status = "exited"
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def volume_cycle(self):
delta = time.time() - self.last_volume_cycle_time
if self.on_ducking and self.ducking_release < time.time():
self.on_ducking = False
self._max_rms = 0
if delta > 0.001:
if self.is_ducking and self.on_ducking:
self.volume_helper.real_volume = \
(self.volume_helper.real_volume - self.volume_helper.ducking_volume_set) * math.exp(- delta / 0.2) \
+ self.volume_helper.ducking_volume_set
else:
self.volume_helper.real_volume = self.volume_helper.volume_set - \
(self.volume_helper.volume_set - self.volume_helper.real_volume) * math.exp(- delta / 0.5)
self.last_volume_cycle_time = time.time()
def ducking_sound_received(self, user, sound):
rms = audioop.rms(sound.pcm, 2)
self._max_rms = max(rms, self._max_rms)
if self._display_rms:
if rms < self.ducking_threshold:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(rms / 200), end='\r')
else:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(self.ducking_threshold / 200)
+ '+' * int((rms - self.ducking_threshold) / 200), end='\r')
if rms > self.ducking_threshold:
if self.on_ducking is False:
self.log.debug("bot: ducking triggered")
self.on_ducking = True
self.ducking_release = time.time() + 1 # ducking release after 1s
def _fadeout(self, _pcm_data, stereo=False, fadein=False):
pcm_data = bytearray(_pcm_data)
if stereo:
if not fadein:
mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 4))]
else:
mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 4)))]
for i in range(int(len(pcm_data) / 4)):
pcm_data[4 * i:4 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[4 * i:4 * i + 2])[0] * mask[i]))
pcm_data[4 * i + 2:4 * i + 4] = struct.pack("<h", round(
struct.unpack("<h", pcm_data[4 * i + 2:4 * i + 4])[0] * mask[i]))
else:
if not fadein:
mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 2))]
else:
mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 2)))]
for i in range(int(len(pcm_data) / 2)):
pcm_data[2 * i:2 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[2 * i:2 * i + 2])[0] * mask[i]))
return bytes(pcm_data) + bytes(len(pcm_data))
# =======================
# Play Control
# =======================
def play(self, index=-1, start_at=0):
if not self.is_pause:
self.interrupt()
if index != -1:
var.playlist.point_to(index)
current = var.playlist.current_item()
self.start_download(current)
self.is_pause = False
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = start_at
def clear(self):
# Kill the ffmpeg thread and empty the playlist
self.interrupt()
var.playlist.clear()
self.wait_for_ready = False
self.log.info("bot: music stopped. playlist trashed.")
def stop(self):
self.interrupt()
self.is_pause = True
if len(var.playlist) > 0:
self.wait_for_ready = True
else:
self.wait_for_ready = False
self.log.info("bot: music stopped.")
def interrupt(self):
# Kill the ffmpeg thread
if self.thread:
self.on_interrupting = True
time.sleep(0.1)
self.song_start_at = -1
self.read_pcm_size = 0
def pause(self):
# Kill the ffmpeg thread
self.interrupt()
self.is_pause = True
self.song_start_at = -1
if len(var.playlist) > 0:
self.pause_at_id = var.playlist.current_item().id
self.log.info(f"bot: music paused at {self.playhead:.2f} seconds.")
def resume(self):
self.is_pause = False
if var.playlist.current_index == -1:
var.playlist.next()
self.playhead = 0
return
music_wrapper = var.playlist.current_item()
if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready():
self.playhead = 0
return
self.wait_for_ready = True
self.pause_at_id = ""
def start_web_interface(addr, port):
global formatter
import interface
# setup logger
werkzeug_logger = logging.getLogger('werkzeug')
logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile'))
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
werkzeug_logger.addHandler(handler)
interface.init_proxy()
interface.web.env = 'development'
interface.web.secret_key = var.config.get('webinterface', 'flask_secret')
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
supported_languages = util.get_supported_language()
parser = argparse.ArgumentParser(
description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini',
help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str,
default=None, help='Settings database file')
parser.add_argument("--music-db", dest='music_db', type=str,
default=None, help='Music library database file')
parser.add_argument("--lang", dest='lang', type=str, default=None,
help='Preferred language. Support ' + ", ".join(supported_languages))
parser.add_argument("-q", "--quiet", dest="quiet",
action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host",
type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user",
type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password",
type=str, help="Server password, if required")
parser.add_argument("-T", "--tokens", dest="tokens",
type=str, help="Server tokens to enter a channel, if required (multiple entries separated with comma ','")
parser.add_argument("-p", "--port", dest="port",
type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel",
type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate",
type=str, default=None, help="Certificate file")
parser.add_argument("-b", "--bandwidth", dest="bandwidth",
type=int, help="Bandwidth used by the bot")
args = parser.parse_args()
# ======================
# Load Config
# ======================
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
var.config = config
parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)],
encoding='utf-8')
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config))
sys.exit()
# ======================
# Setup Logger
# ======================
bot_logger = logging.getLogger("bot")
bot_logger.setLevel(logging.INFO)
if args.verbose:
bot_logger.setLevel(logging.DEBUG)
bot_logger.debug("Starting in DEBUG loglevel")
elif args.quiet:
bot_logger.setLevel(logging.ERROR)
bot_logger.error("Starting in ERROR loglevel")
logfile = util.solve_filepath(var.config.get('bot', 'logfile').strip())
handler = None
if logfile:
print(f"Redirecting stdout and stderr to log file: {logfile}")
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
if var.config.getboolean("bot", "redirect_stderr", fallback=False):
sys.stderr = util.LoggerIOWrapper(bot_logger, logging.INFO,
fallback_io_buffer=sys.stderr.buffer)
else:
handler = logging.StreamHandler()
util.set_logging_formatter(handler, bot_logger.level)
bot_logger.addHandler(handler)
logging.getLogger("root").addHandler(handler)
var.bot_logger = bot_logger
# ======================
# Load Database
# ======================
if args.user:
username = args.user
else:
username = var.config.get("bot", "username")
sanitized_username = "".join([x if x.isalnum() else "_" for x in username])
var.settings_db_path = args.db if args.db is not None else util.solve_filepath(
config.get("bot", "database_path", fallback=f"settings-{sanitized_username}.db"))
var.music_db_path = args.music_db if args.music_db is not None else util.solve_filepath(
config.get("bot", "music_database_path", fallback="music.db"))
var.db = SettingsDatabase(var.settings_db_path)
if var.config.get("bot", "save_music_library", fallback=True):
var.music_db = MusicDatabase(var.music_db_path)
else:
var.music_db = MusicDatabase(":memory:")
DatabaseMigration(var.db, var.music_db).migrate()
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
if not var.music_folder.endswith(os.sep):
# The file searching logic assumes that the music folder ends in a /
var.music_folder = var.music_folder + os.sep
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
# ======================
# Translation
# ======================
lang = ""
if args.lang:
lang = args.lang
else:
lang = var.config.get('bot', 'language', fallback='en_US')
if lang not in supported_languages:
raise KeyError(f"Unsupported language {lang}")
var.language = lang
constants.load_lang(lang)
# ======================
# Prepare Cache
# ======================
var.cache = MusicCache(var.music_db)
if var.config.getboolean("bot", "refresh_cache_on_startup", fallback=True):
var.cache.build_dir_cache()
# ======================
# Load playback mode
# ======================
playback_mode = None
if var.db.has_option("playlist", "playback_mode"):
playback_mode = var.db.get('playlist', 'playback_mode')
else:
playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot")
if playback_mode in ["one-shot", "repeat", "random", "autoplay"]:
var.playlist = media.playlist.get_playlist(playback_mode)
else:
raise KeyError(f"Unknown playback mode '{playback_mode}'")
# ======================
# Create bot instance
# ======================
var.bot = MumbleBot(args)
command.register_all_commands(var.bot)
# load playlist
if var.config.getboolean('bot', 'save_playlist', fallback=True):
var.bot_logger.info("bot: load playlist from previous session")
var.playlist.load()
# ============================
# Start the web interface
# ============================
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
bot_logger.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
# Start the main loop.
var.bot.loop()
|
DrawableMesh.py | import pythreejs as three
import numpy as np
from time import time, sleep
from .Colors import colors
from ..utils import Observer, ColorMap
import threading
import copy
class DrawableMesh(Observer):
def __init__(self, geometry, mesh_color = None, reactive = False):
super(DrawableMesh, self).__init__()
self._external_color = colors.teal
self._internal_color = colors.orange
self._color_map = None
self._metric_string = None
self._c_map_string = None
self._label_colors = None
self.geometry = geometry
if reactive:
self.geometry.attach(self)
self.geometry_color = self.__initialize_geometry_color(mesh_color)
self.mesh = self.__initialize_mesh()
self.wireframe = self.__initialize_wireframe()
self.threejs_items = [self.mesh, self.wireframe]
self.updating = False
self.queue = False
def __initialize_geometry_color(self, mesh_color, geometry = None):
if geometry is None:
geometry = self.geometry
if mesh_color is None:
color = np.repeat(self._external_color.reshape(1, 3),
geometry.num_triangles*3, axis=0
)
if hasattr(self.geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
color[internal_color] = self._internal_color
return color
def update_wireframe_color(self, new_color):
self.wireframe.material.color = new_color
def update_wireframe_opacity(self, new_opacity):
self.wireframe.material.opacity = new_opacity
def update_internal_color(self, new_color, geometry = None):
if geometry is None:
geometry = self.geometry
self._internal_color = np.array(new_color)
if hasattr(geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
self.geometry_color[internal_color] = new_color
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1)
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_external_color(self, new_color, geometry = None):
if geometry is None:
geometry = self.geometry
self._external_color = np.array(new_color)
if hasattr(geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
self.geometry_color[np.logical_not(internal_color)] = new_color
else:
self.geometry_color[:] = new_color
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1)
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_color_map(self, new_colors, geometry = None):
if geometry is None:
geometry = self.geometry
self.geometry_color[:] = geometry._as_threejs_colors(colors= new_colors)
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1)
self.mesh.geometry.attributes['color'].data.array = interleaved
def compute_color_map(self, metric_string, c_map_string, geometry=None):
if geometry is None:
geometry = self.geometry
self._metric_string = metric_string
self._c_map_string = c_map_string
(min_range, max_range), metric = self.geometry.simplex_metrics[metric_string]
c_map = ColorMap.color_maps[c_map_string]
if min_range is None or max_range is None:
min_range = np.min(metric)
max_range = np.max(metric)
if (np.abs(max_range-min_range) > 1e-7):
normalized_metric = ((metric - np.min(metric))/np.ptp(metric)) * (c_map.shape[0]-1)
else:
normalized_metric = np.repeat(np.mean(metric), metric.shape[0])
else:
normalized_metric = np.clip(metric, min_range, max_range)
normalized_metric = (normalized_metric - min_range)/(max_range-min_range) * (c_map.shape[0]-1)
normalized_metric = 1-normalized_metric
metric_to_colormap = np.rint(normalized_metric).astype(np.int)
mesh_color = c_map[metric_to_colormap]
self._color_map = mesh_color
self.update_color_map(mesh_color, geometry)
def update_color_label(self, geometry = None):
if geometry is None:
geometry = self.geometry
mesh_color = np.zeros((self.geometry.labels.size,3), dtype=np.float)
for idx, i in enumerate(self.geometry.labels.reshape(-1)):
mesh_color[idx] = self._label_colors[i]
self._color_map = mesh_color
self.update_color_map(mesh_color)
def __initialize_wireframe(self):
edges_material = three.LineBasicMaterial(color='#686868',
linewidth = 1,
depthTest=True,
opacity=.2,
transparent=True)
wireframe = self.__get_wireframe_from_boundary()
return three.LineSegments(wireframe, material = edges_material)
def __get_drawable_from_boundary(self):
geometry_attributes = {}
tris, vtx_normals = self.geometry._as_threejs_triangle_soup()
new_colors = self.geometry_color[self.geometry._as_threejs_colors()].astype(np.float32)
interleaved_array = np.concatenate((tris, new_colors, vtx_normals), axis=1)
buffer = three.InterleavedBuffer(array = interleaved_array, stride = 3)
geometry_attributes['position'] = three.InterleavedBufferAttribute(data=buffer, itemSize=3, dynamic = True)
geometry_attributes['color'] = three.InterleavedBufferAttribute(data=buffer, itemSize=3, offset=3, dynamic=True)
geometry_attributes['normal'] = three.InterleavedBufferAttribute(data=buffer, itemSize=3, offset=6, dynamic=True)
drawable_geometry = three.BufferGeometry(attributes = geometry_attributes)
return drawable_geometry
def __as_buffer_attr(self, array):
return three.BufferAttribute(array, normalized = False, dynamic = True)
def __get_wireframe_from_boundary(self):
edges = self.geometry.vertices[self.geometry.as_edges_flat()].astype(np.float32)
buffer = np.empty((int(edges.shape[0] * 3), 3), dtype=np.float32).reshape(-1, 3)
buffer[:edges.shape[0]] = edges
vertices = self.__as_buffer_attr(buffer)
wireframe = three.BufferGeometry(attributes={'position': vertices})
wireframe.exec_three_obj_method("setDrawRange", 0, edges.shape[0])
return wireframe
def __initialize_mesh(self):
drawable_geometry = self.__get_drawable_from_boundary()
material = three.MeshLambertMaterial(
polygonOffset=True,
polygonOffsetFactor=1,
polygonOffsetUnits=1,
flatShading = True,
color = "white",
opacity = 1.,
transparent = False,
side = 'DoubleSide',
wireframe=False,
vertexColors = 'FaceColors',
)
return three.Mesh(
geometry=drawable_geometry,
material=material,
position=[0, 0, 0]
)
def run(self, geometry):
edges = self.geometry.vertices[self.geometry.as_edges_flat()].astype(np.float32)
self.wireframe.geometry.attributes['position'].array[:edges.shape[0]] = edges
self.wireframe.geometry.exec_three_obj_method('setDrawRange', 0, edges.shape[0])
self.wireframe.geometry.attributes['position'].array = self.wireframe.geometry.attributes['position'].array
self.geometry_color = self.__initialize_geometry_color(None, geometry)
if self._color_map is None:
self.update_internal_color(self._internal_color, geometry)
self.update_external_color(self._external_color, geometry)
elif self._label_colors is not None:
self.update_color_label(geometry)
else:
self.compute_color_map(self._metric_string, self._c_map_string, geometry)
if self.queue:
self.queue = False
self.updating = False
self.update()
else:
self.updating = False
def update(self):
if (not self.updating):
self.updating=True
thread = threading.Thread(target=self.run, args=(self.geometry.copy(),))
thread.daemon = True
thread.start()
else:
self.queue = True
@property
def center(self):
return self.geometry.center
@property
def scale(self):
return self.geometry.scale |
sock.py | import json
import logging
import threading
from pajbot.managers.redis import RedisManager
log = logging.getLogger(__name__)
class SocketManager:
def __init__(self, streamer_name, callback):
self.handlers = {}
self.pubsub = RedisManager.get().pubsub()
self.running = True
self.streamer_name = streamer_name
self.callback = callback
self.pubsub.subscribe("test") # need this for keepalive? idk
self.thread = threading.Thread(target=self.start, name="SocketManagerThread")
self.thread.daemon = True
self.thread.start()
def quit(self):
self.running = False
def add_handler(self, topic, method):
topic = f"{self.streamer_name}:{topic}"
if topic not in self.handlers:
self.handlers[topic] = [method]
self.pubsub.subscribe(topic)
else:
self.handlers[topic].append(method)
def start(self):
while self.running:
message = self.pubsub.get_message(ignore_subscribe_messages=True, timeout=1)
if not message:
continue
if message["channel"] not in self.handlers:
continue
try:
parsed_data = json.loads(message["data"])
except json.decoder.JSONDecodeError:
log.exception("Bad JSON data on %s topic: '%s'", message["channel"], message["data"])
continue
for handler in self.handlers[message["channel"]]:
# invokes the handler on the bot's main thread (the IRC event loop)
self.callback(handler, parsed_data)
self.pubsub.close()
class SocketClientManager:
streamer_name = None
@classmethod
def init(cls, streamer_name):
cls.streamer_name = streamer_name
@classmethod
def send(cls, topic, data):
if cls.streamer_name is None:
raise ValueError("streamer_name not set in SocketClientManager")
topic = f"{cls.streamer_name}:{topic}"
RedisManager.publish(topic, json.dumps(data))
|
waterrowerinterface.py | # ---------------------------------------------------------------------------
# Original code from the bfritscher Repo waterrower
# https://github.com/bfritscher/waterrower
# ---------------------------------------------------------------------------
#
# -*- coding: utf-8 -*-
import threading
import logging
import time
import serial
import serial.tools.list_ports
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logHandler = logging.StreamHandler()
filelogHandler = logging.FileHandler("logs.log")
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logHandler.setFormatter(formatter)
filelogHandler.setFormatter(formatter)
logger.addHandler(filelogHandler)
logger.addHandler(logHandler)
MEMORY_MAP = {'055': {'type': 'total_distance_m', 'size': 'double', 'base': 16},
'140': {'type': 'total_strokes', 'size': 'double', 'base': 16},
'088': {'type': 'watts', 'size': 'double', 'base': 16},
'08A': {'type': 'total_kcal', 'size': 'triple', 'base': 16},
'14A': {'type': 'avg_distance_cmps', 'size': 'double', 'base': 16},
'148': {'type': 'total_speed_cmps', 'size': 'double', 'base': 16},
'1E0': {'type': 'display_sec_dec', 'size': 'single', 'base': 10},
'1E1': {'type': 'display_sec', 'size': 'single', 'base': 10},
'1E2': {'type': 'display_min', 'size': 'single', 'base': 10},
'1E3': {'type': 'display_hr', 'size': 'single', 'base': 10},
# from zone math
'1A0': {'type': 'heart_rate', 'size': 'double', 'base': 16},
'1A6': {'type': '500mps', 'size': 'double', 'base': 16},
'1A9': {'type': 'stroke_rate', 'size': 'single', 'base': 16},
# explore
'142': {'type': 'avg_time_stroke_whole', 'size': 'single', 'base': 16},
'143': {'type': 'avg_time_stroke_pull', 'size': 'single', 'base': 16},
#other
'0A9': {'type': 'tank_volume', 'size': 'single', 'base': 16, 'not_in_loop': True},
}
# ACH values = Ascii coded hexadecimal
# REQUEST sent from PC to device
# RESPONSE sent from device to PC
USB_REQUEST = "USB" # Application starting communication’s
WR_RESPONSE = "_WR_" # Hardware Type, Accept USB start sending packets
EXIT_REQUEST = "EXIT" # Application is exiting, stop sending packets
OK_RESPONSE = "OK" # Packet Accepted
ERROR_RESPONSE = "ERROR" # Unknown packet
PING_RESPONSE = "PING" # Ping
RESET_REQUEST = "RESET" # Request the rowing computer to reset, disable interactive mode
MODEL_INFORMATION_REQUEST = "IV?" # Request Model Information
MODEL_INFORMATION_RESPONSE = "IV" # Current model information IV+Model+Version High+Version Low
READ_MEMORY_REQUEST = "IR" # Read a memory location IR+(S=Single,D=Double,T=Triple) + XXX
READ_MEMORY_RESPONSE = "ID" # Value from a memory location ID +(type) + Y3 Y2 Y1
STROKE_START_RESPONSE = "SS" # Start of stroke
STROKE_END_RESPONSE = "SE" # End of stroke
PULSE_COUNT_RESPONSE = "P" # Pulse Count XX in the last 25mS, ACH value
# Display Settings (not used)
DISPLAY_SET_INTENSITY_MPS_REQUEST = "DIMS"
DISPLAY_SET_INTENSITY_MPH_REQUEST = "DIMPH"
DISPLAY_SET_INTENSITY_500M_REQUEST = "DI500"
DISPLAY_SET_INTENSITY_2KM_REQUEST = "DI2KM"
DISPLAY_SET_INTENSITY_WATTS_REQUEST = "DIWA"
DISPLAY_SET_INTENSITY_CALHR_REQUEST = "DICH"
DISPLAY_SET_INTENSITY_AVG_MPS_REQUEST = "DAMS"
DISPLAY_SET_INTENSITY_AVG_MPH_REQUEST = "DAMPH"
DISPLAY_SET_INTENSITY_AVG_500M_REQUEST = "DA500"
DISPLAY_SET_INTENSITY_AVG_2KM_REQUEST = "DA2KM"
DISPLAY_SET_DISTANCE_METERS_REQUEST = "DDME"
DISPLAY_SET_DISTANCE_MILES_REQUEST = "DDMI"
DISPLAY_SET_DISTANCE_KM_REQUEST = "DDKM"
DISPLAY_SET_DISTANCE_STROKES_REQUEST = "DDST"
# Interactive mode
INTERACTIVE_MODE_START_RESPONSE = "AIS" # interactive mode requested by device
INTERACTIVE_MODE_START_ACCEPT_REQUEST = "AIA" # confirm interactive mode, key input is redirect to PC
INTERACTIVE_MODE_END_REQUEST = "AIE" # cancel interactive mode
INTERACTIVE_KEYPAD_RESET_RESPONSE = "AKR" # RESET key pressed, interactive mode will be cancelled
INTERACTIVE_KEYPAD_UNITS_RESPONSE = "AK1" # Units button pressed
INTERACTIVE_KEYPAD_ZONES_RESPONSE = "AK2" # Zones button pressed
INTERACTIVE_KEYPAD_WORKOUT_RESPONSE = "AK3" # Workout button pressed
INTERACTIVE_KEYPAD_UP_RESPONSE = "AK4" # Up arrow button pressed
INTERACTIVE_KEYPAD_OK_RESPONSE = "AK5" # Ok button pressed
INTERACTIVE_KEYPAD_DOWN_RESPONSE = "AK6" # Down arrow button pressed
INTERACTIVE_KEYPAD_ADVANCED_RESPONSE = "AK7" # Advanced button pressed
INTERACTIVE_KEYPAD_STORED_RESPONSE = "AK8" # Stored Programs button pressed
INTERACTIVE_KEYPAD_HOLD_RESPONSE = "AK9" # Hold/cancel button pressed
# Workout
WORKOUT_SET_DISTANCE_REQUEST = "WSI" # Define a distance workout + x(unit, 1-4) + YYYY = ACH
WORKOUT_SET_DURATION_REQUEST = "WSU" # Define a duration workout + YYYY = ACH seconds
WORKOUT_INTERVAL_START_SET_DISTANCE_REQUEST = "WII" # Define an interval distance workout
WORKOUT_INTERVAL_START_SET_DURATION_REQUEST = "WIU" # Define an interval duration workout
WORKOUT_INTERVAL_ADD_END_REQUEST = "WIN" # Add/End an interval to a workout XXXX(==FFFFF to end) + YYYY
# UNITS
UNIT_METERS = 1
UNIT_MILES = 2
UNIT_KM = 3
UNIT_STROKES = 4
SIZE_MAP = {'single': 'IRS',
'double': 'IRD',
'triple': 'IRT',}
UNIT_MAP = {'meters': 1,
'miles': 2,
'km': 3,
'strokes': 4}
SIZE_PARSE_MAP = {'single': lambda cmd: cmd[6:8],
'double': lambda cmd: cmd[6:10],
'triple': lambda cmd: cmd[6:12]}
def find_port():
ports = serial.tools.list_ports.comports()
for (i, (path, name, _)) in enumerate(ports):
if "WR" in name:
logger.info("port found: %s" % path)
return path
#print("port not found retrying in 5s")
logger.warning("port not found retrying in 5s")
time.sleep(5)
return find_port()
def build_daemon(target):
t = threading.Thread(target=target)
t.daemon = True
return t
def build_event(type, value=None, raw=None):
return {"type": type,
"value": value,
"raw": raw,
"at": int(round(time.time() * 1000))}
def is_live_thread(t):
return t and t.is_alive()
def read_reply(cmd):
address = cmd[3:6]
memory = MEMORY_MAP.get(address)
if memory:
size = memory['size']
value_fn = SIZE_PARSE_MAP.get(size, lambda cmd: None)
value = value_fn(cmd)
if value is None:
logger.error('unknown size: %s', size)
else:
return build_event(memory['type'], int(value, base=memory['base']), cmd)
else:
logger.error('cannot read reply for %s', cmd)
def event_from(line):
try:
cmd = line.strip() # to ensure no space are in front or at the back call the function strip()
cmd = cmd.decode('utf8') # encode it to utf8 ro remove b'
if cmd == STROKE_START_RESPONSE: # with is "SS" from the waterrower
return build_event(type='stroke_start', raw=cmd) # Call the methode to create a dict with the name stroke_start and the row command used for it "SS"
elif cmd == STROKE_END_RESPONSE: # with is "SE" from the waterrower
return build_event(type='stroke_end', raw=cmd) # Call the methode to create a dict with the name stroke_end and the row command used for it "SE"
elif cmd == OK_RESPONSE: # If waterrower responce "OK" do nothing
return None
elif cmd[:2] == MODEL_INFORMATION_RESPONSE: # If MODEL information has been request, the model responce would be "IV"
return build_event(type='model', raw=cmd) # Call the methode to create a dict with the model and the row command used for it "SE"
elif cmd[:2] == READ_MEMORY_RESPONSE: # if after memory request the responce comes from the waterrower
return read_reply(cmd) # proced to the function read_reply which strips away everything and keeps the value and create the event dict for that request
elif cmd[:4] == PING_RESPONSE: # if Ping responce is recived which is all the time the rower is in standstill
return build_event(type='ping', raw=cmd) # do nothing
elif cmd[:1] == PULSE_COUNT_RESPONSE: # Pluse count count the amount of 25 teeth passed 25teeth passed = P1
return build_event(type='pulse', raw=cmd) # do nothing
elif cmd == ERROR_RESPONSE: # If Waterrower responce with an error
return build_event(type='error', raw=cmd) # crate an event with the dict entry error and the raw command
elif cmd[:2] == STROKE_START_RESPONSE: # Pluse count count the amount of 25 teeth passed 25teeth passed = P1
print(cmd)
else:
return None
except Exception as e:
logger.error('could not build event for: %s %s', line, e)
class Rower(object):
def __init__(self, options=None):
self._callbacks = set()
self._stop_event = threading.Event()
self._demo = False
# if options and options.demo:
# from demo import FakeS4
# self._serial = FakeS4()
# self._demo = True
# else:
self._serial = serial.Serial()
self._serial.baudrate = 19200
self._request_thread = build_daemon(target=self.start_requesting)
self._capture_thread = build_daemon(target=self.start_capturing)
self._request_thread.start()
self._capture_thread.start()
def is_connected(self):
return self._serial.isOpen() and is_live_thread(self._request_thread) and \
is_live_thread(self._capture_thread)
def _find_serial(self):
if not self._demo:
self._serial.port = find_port()
try:
self._serial.open()
#print("serial open")
logger.info("serial open")
except serial.SerialException as e:
print("serial open error waiting")
time.sleep(5)
self._serial.close()
self._find_serial()
def open(self):
if self._serial and self._serial.isOpen():
self._serial.close()
self._find_serial()
if self._stop_event.is_set():
#print("reset threads")
logger.info("reset threads")
self._stop_event.clear()
self._request_thread = build_daemon(target=self.start_requesting)
self._capture_thread = build_daemon(target=self.start_capturing)
self._request_thread.start()
logger.info("Thread daemon _request started")
self._capture_thread.start()
logger.info("Thread daemon _capture started")
self.write(USB_REQUEST)
def close(self):
self.notify_callbacks(build_event("exit"))
if self._stop_event:
self._stop_event.set()
if self._serial and self._serial.isOpen():
self.write(EXIT_REQUEST)
time.sleep(0.1) # time for capture and request loops to stop running
self._serial.close()
def write(self, raw):
try:
self._serial.write(str.encode(raw.upper() + '\r\n'))
self._serial.flush()
except Exception as e:
print(e)
#print("Serial error try to reconnect")
logger.error("Serial error try to reconnect")
self.open()
def start_capturing(self):
while not self._stop_event.is_set():
if self._serial.isOpen():
try:
line = self._serial.readline()
event = event_from(line)
if event:
self.notify_callbacks(event)
except Exception as e:
#print("could not read %s" % e)
logger.error("could not read %s" % e)
try:
self._serial.reset_input_buffer()
except Exception as e2:
#print("could not reset_input_buffer %s" % e2)
logger.error("could not reset_input_buffer %s" % e2)
else:
self._stop_event.wait(0.1)
def start_requesting(self):
while not self._stop_event.is_set():
if self._serial.isOpen():
for address in MEMORY_MAP:
if 'not_in_loop' not in MEMORY_MAP[address]:
self.request_address(address)
self._stop_event.wait(0.025)
else:
self._stop_event.wait(0.1)
def reset_request(self):
self.write(RESET_REQUEST)
self.notify_callbacks(build_event('reset'))
logger.info("Reset requested")
def request_info(self):
self.write(MODEL_INFORMATION_REQUEST)
self.request_address('0A9')
def request_address(self, address):
size = MEMORY_MAP[address]['size']
cmd = SIZE_MAP[size]
self.write(cmd + address)
def register_callback(self, cb):
self._callbacks.add(cb)
def remove_callback(self, cb):
self._callbacks.remove(cb)
def notify_callbacks(self, event):
for cb in self._callbacks:
cb(event)
|
api_image_test.py | import contextlib
import json
import shutil
import socket
import tarfile
import tempfile
import threading
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from ..helpers import requires_api_version, requires_experimental
from .base import BaseAPIIntegrationTest, TEST_IMG
class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
assert 'Id' in res1[0]
res10 = res1[0]
assert 'Created' in res10
assert 'RepoTags' in res10
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
assert len(distinct) == self.client.info()['Images']
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
assert type(res1[0]) == six.text_type
class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
assert type(res) == six.text_type
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull(
'hello-world', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_pull_invalid_platform(self):
with pytest.raises(docker.errors.APIError) as excinfo:
self.client.pull('hello-world', platform='foobar')
# Some API versions incorrectly returns 500 status; assert 4xx or 5xx
assert excinfo.value.is_error()
assert 'unknown operating system' in excinfo.exconly() \
or 'invalid platform' in excinfo.exconly()
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(id)
assert 'ContainerConfig' in img
assert 'Image' in img['ContainerConfig']
assert TEST_IMG == img['ContainerConfig']['Image']
busybox_id = self.client.inspect_image(TEST_IMG)['Id']
assert 'Parent' in img
assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
cid = self.client.create_container(TEST_IMG, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
cid, changes=['EXPOSE 8000', 'CMD ["bash"]']
)
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(cid['Id'])
assert '8000/tcp' in img['Config']['ExposedPorts']
assert img['Config']['Cmd'] == ['bash']
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
logs = self.client.remove_image(img_id, force=True)
assert {"Deleted": img_id} in logs
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
assert len(res) == 0
class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile(delete=False) as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_image_from_data_with_changes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
def test_import_image_with_changes(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
statuses = self.client.import_image(
src=tar_filename, repository='test/import-from-file',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
# Docs say output is available in 1.23, but this test fails on 1.12.0
@requires_api_version('1.24')
def test_get_load_image(self):
test_img = 'hello-world:latest'
self.client.pull(test_img)
data = self.client.get_image(test_img)
assert data
output = self.client.load_image(data)
assert any([
line for line in output
if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@requires_api_version('1.25')
class PruneImagesTest(BaseAPIIntegrationTest):
def test_prune_images(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
# Ensure busybox does not get pruned
ctnr = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.tmp_containers.append(ctnr)
self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
img_id = self.client.inspect_image('hello-world')['Id']
result = self.client.prune_images()
assert img_id not in [
img.get('Deleted') for img in result.get('ImagesDeleted') or []
]
result = self.client.prune_images({'dangling': False})
assert result['SpaceReclaimed'] > 0
assert 'hello-world:latest' in [
img.get('Untagged') for img in result['ImagesDeleted']
]
assert img_id in [
img.get('Deleted') for img in result['ImagesDeleted']
]
class SaveLoadImagesTest(BaseAPIIntegrationTest):
@requires_api_version('1.23')
def test_get_image_load_image(self):
with tempfile.TemporaryFile() as f:
stream = self.client.get_image(TEST_IMG)
for chunk in stream:
f.write(chunk)
f.seek(0)
result = self.client.load_image(f.read())
success = False
result_line = 'Loaded image: {}\n'.format(TEST_IMG)
for data in result:
print(data)
if 'stream' in data:
if data['stream'] == result_line:
success = True
break
assert success is True
@requires_api_version('1.30')
class InspectDistributionTest(BaseAPIIntegrationTest):
def test_inspect_distribution(self):
data = self.client.inspect_distribution('busybox:latest')
assert data is not None
assert 'Platforms' in data
assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
|
test_cloudpickle.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import multiprocessing
import os
import pickle
import subprocess
import sys
import tempfile
import textwrap
import traceback
import uuid
from odps.compat import six, unittest, PY27
from odps.lib.cloudpickle import loads, dumps
from odps.utils import to_binary
from odps.tests.core import TestBase, numpy_case
# if bytecode needed in debug, switch it on
DUMP_CODE = False
CROSS_VAR_PICKLE_CODE = """
import base64
import json
import sys
import platform
import os
import pickle
try:
os.unlink(os.path.realpath(__file__))
except Exception:
pass
import_paths = json.loads(r\"\"\"
{import_paths}
\"\"\".strip())
sys.path.extend(import_paths)
from odps.lib.cloudpickle import dumps
from odps.utils import to_str
from {module_name} import {method_ref}
client_impl = (sys.version_info[0],
sys.version_info[1],
platform.python_implementation().lower())
result_obj = {method_ref}()
result_tuple = (
base64.b64encode(dumps(result_obj, dump_code={dump_code})),
client_impl,
)
with open(r'{pickled_file}', 'w') as f:
f.write(to_str(base64.b64encode(pickle.dumps(result_tuple, protocol=0))))
f.close()
""".replace('{module_name}', __name__).replace('{dump_code}', repr(DUMP_CODE))
def pickled_runner(q, pickled, args, kwargs, **kw):
try:
wrapper = kw.pop('wrapper', None)
impl = kwargs.pop('impl', (3, 5, 'cpython'))
if wrapper:
wrapper = loads(wrapper)
else:
wrapper = lambda v, a, kw: v(*a, **kw)
deserial = loads(base64.b64decode(pickled), impl=impl, dump_code=DUMP_CODE)
q.put(wrapper(deserial, args, kwargs))
except:
traceback.print_exc()
raise
def run_pickled(pickled, *args, **kwargs):
pickled, kwargs['impl'] = pickle.loads(base64.b64decode(pickled))
wrapper_kw = {}
if 'wrapper' in kwargs:
wrapper_kw['wrapper'] = dumps(kwargs.pop('wrapper'))
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=pickled_runner, args=(queue, pickled, args, kwargs), kwargs=wrapper_kw)
proc.start()
proc.join()
return queue.get(timeout=5)
def _gen_nested_yield_obj():
out_closure = 10
class _NestClass(object):
inner_gain = 5
def __init__(self):
self._o_closure = out_closure
def nested_method(self, add_val):
if add_val < 5:
return self._o_closure + add_val * 2 + self.inner_gain
else:
return self._o_closure + add_val + self.inner_gain
class _FuncClass(object):
def __init__(self):
self.nest = _NestClass()
def __call__(self, add_val):
yield self.nest.nested_method(add_val)
return _FuncClass
def _gen_from_import_func():
def fun(val):
from numpy import sinh
return float(sinh(val))
return fun
class BuildMeta(type):
pass
class BuildBase(object):
pass
if six.PY2:
def _gen_class_builder_func():
out_closure = 10
def _gen_nested_class_obj():
class BuildCls(BuildBase):
__metaclass__ = BuildMeta
a = out_closure
def b(self, add_val):
print(self.a)
return self.a + add_val + out_closure
return BuildCls
return _gen_nested_class_obj
else:
py3_code = textwrap.dedent("""
def _gen_class_builder_func():
out_closure = 10
def _gen_nested_class_obj():
class BuildCls(BuildBase, metaclass=BuildMeta):
a = out_closure
def b(self, add_val):
print(self.a)
return self.a + add_val + out_closure
return BuildCls
return _gen_nested_class_obj
""")
my_locs = locals().copy()
six.exec_(py3_code, globals(), my_locs)
_gen_class_builder_func = my_locs.get('_gen_class_builder_func')
if sys.version_info[:2] < (3, 6):
def _gen_format_string_func():
out_closure = 4.0
def _format_fun(arg):
return 'Formatted stuff {0}: {1:>5}'.format(arg, out_closure)
return _format_fun
else:
py36_code = textwrap.dedent("""
def _gen_format_string_func():
out_closure = 4.0
def _format_fun(arg):
return f'Formatted stuff {arg}: {out_closure:>5}'
return _format_fun
""")
my_locs = locals().copy()
six.exec_(py36_code, globals(), my_locs)
_gen_format_string_func = my_locs.get('_gen_format_string_func')
if sys.version_info[:2] < (3, 6):
def _gen_build_unpack_func():
out_closure = (1, 2, 3)
def merge_kws(a, b, *args, **kwargs):
kwargs.update(dict(a=a, b=b))
kwargs.update((str(idx), v) for idx, v in enumerate(args))
return kwargs
def _gen_fun(arg):
t = out_closure + (4, ) + (5, 6, 7) + (arg, )
l = list(out_closure) + [4, ] + [5, 6, 7]
s = set(out_closure) | set([4]) | set([5, 6, 7])
m = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
wk = merge_kws(3, 4, 5, *(out_closure + (1, 2, 3)), **dict(m=1, n=2, p=3, q=4, r=5))
return t, l, s, m, wk
return _gen_fun
else:
py36_code = textwrap.dedent("""
def _gen_build_unpack_func():
out_closure = (1, 2, 3)
def merge_kws(a, b, *args, **kwargs):
kwargs.update(dict(a=a, b=b))
kwargs.update((str(idx), v) for idx, v in enumerate(args))
return kwargs
def _gen_fun(arg):
t = (*out_closure, *(4, ), *(5, 6, 7), *(arg, ))
l = [*out_closure, *(4, ), *[5, 6, 7]]
s = {*out_closure, *[4], *[5, 6, 7]}
m = {**dict(a=1, b=2), **dict(c=3), **dict(d=4, e=5)}
wk = merge_kws(3, 4, 5, *out_closure, *[1, 2, 3], **dict(m=1, n=2), **dict(p=3, q=4, r=5))
return t, l, s, m, wk
return _gen_fun
""")
my_locs = locals().copy()
six.exec_(py36_code, globals(), my_locs)
_gen_build_unpack_func = my_locs.get('_gen_build_unpack_func')
if sys.version_info[:2] < (3, 6):
def _gen_matmul_func():
out_closure = [[4, 9, 2], [3, 5, 7], [8, 1, 6]]
def _gen_fun(arg):
import numpy as np
a = np.array(out_closure)
b = np.array([9, 5, arg])
c = np.dot(a, b)
return repr(c)
return _gen_fun
else:
py36_code = textwrap.dedent("""
def _gen_matmul_func():
out_closure = [[4, 9, 2], [3, 5, 7], [8, 1, 6]]
def _gen_fun(arg):
import numpy as np
a = np.array(out_closure)
b = np.array([9, 5, arg])
c = a @ b
return repr(c)
return _gen_fun
""")
my_locs = locals().copy()
six.exec_(py36_code, globals(), my_locs)
_gen_matmul_func = my_locs.get('_gen_matmul_func')
def _gen_try_except_func():
out_closure = dict(k=12.0)
def _gen_fun(arg):
ex = None
agg = arg
def _cl():
print(ex)
try:
agg *= out_closure['not_exist']
except KeyError as ex:
agg += 1
try:
agg -= out_closure['k']
except KeyError as ex:
_cl()
agg /= 10
return agg
return _gen_fun
def _gen_nested_fun():
out_closure = 10
def _gen_nested_obj():
# class NestedClass(object):
def nested_method(add_val):
return out_closure + add_val
return nested_method
return lambda v: _gen_nested_obj()(*(v, ))
class Test(TestBase):
@staticmethod
def _invoke_other_python_pickle(executable, method_ref):
paths = [path for path in sys.path if 'odps' in path.lower()]
if callable(method_ref):
method_ref = method_ref.__name__
ts_name = os.path.join(tempfile.gettempdir(), 'pyodps_pk_cross_test_{0}.py'.format(str(uuid.uuid4())))
tp_name = os.path.join(tempfile.gettempdir(), 'pyodps_pk_cross_pickled_{0}'.format(str(uuid.uuid4())))
script_text = CROSS_VAR_PICKLE_CODE.format(import_paths=json.dumps(paths), method_ref=method_ref,
pickled_file=tp_name)
with open(ts_name, 'w') as out_file:
out_file.write(script_text)
out_file.close()
proc = subprocess.Popen([executable, ts_name])
proc.wait()
if not os.path.exists(tp_name):
raise SystemError('Pickle error occured!')
else:
with open(tp_name, 'r') as f:
pickled = f.read().strip()
f.close()
os.unlink(tp_name)
if not pickled:
raise SystemError('Pickle error occured!')
return pickled
def testRangeObject(self):
obj_serial = dumps(range(10))
deserial = loads(obj_serial)
self.assertListEqual(list(range(10)), list(deserial))
def testNestedFunc(self):
func = _gen_nested_fun()
obj_serial = base64.b64encode(dumps(func))
deserial = loads(base64.b64decode(obj_serial))
self.assertEqual(deserial(20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
@numpy_case
def testFromImport(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_from_import_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_from_import_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2FormatString(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_format_string_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_format_string_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2BuildUnpack(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_build_unpack_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_build_unpack_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
@numpy_case
def test3to2MatMul(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_matmul_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_matmul_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2TryExcept(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_try_except_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_try_except_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2NestedFunc(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_nested_fun()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_nested_fun))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
def testNestedClassObj(self):
func = _gen_nested_yield_obj()
obj_serial = base64.b64encode(dumps(func))
deserial = loads(base64.b64decode(obj_serial))
self.assertEqual(sum(deserial()(20)), sum(func()(20)))
@unittest.skipIf(not PY27, 'Only runnable under Python 2.7')
def test3to27NestedYieldObj(self):
try:
executable = self.config.get('test', 'py3_executable')
if not executable:
return
except:
return
func = _gen_nested_yield_obj()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_nested_yield_obj))
self.assertEqual(run_pickled(py3_serial, 20, wrapper=lambda fun, a, kw: sum(fun()(*a, **kw))),
sum(func()(20)))
@unittest.skipIf(not PY27, 'Only runnable under Python 2.7')
def test26to27NestedYieldObj(self):
try:
executable = self.config.get('test', 'py26_executable')
if not executable:
return
except:
return
func = _gen_nested_yield_obj()
py26_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_nested_yield_obj))
self.assertEqual(run_pickled(py26_serial, 20, wrapper=lambda fun, a, kw: sum(fun()(*a, **kw))),
sum(func()(20)))
@unittest.skipIf(not PY27, 'Only runnable under Python 2.7')
def test3to27NestedClassObj(self):
try:
executable = self.config.get('test', 'py3_executable')
if not executable:
return
except:
return
cls = _gen_class_builder_func()()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_class_builder_func))
self.assertEqual(run_pickled(py3_serial, 5, wrapper=lambda cls, a, kw: cls()().b(*a, **kw)),
cls().b(5))
|
led.py | import RPi.GPIO as GPIO
import time
import atexit
import threading
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(12,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
GPIO.setup(16,GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(15,1)
def flashLights(arg1, stop_event):
#print "Flashing"
x = 0
while(not stop_event.is_set()):
y = x % 2
GPIO.output(11 + y,1)
GPIO.output(12 - y,0)
x = x + 1
time.sleep(0.3)
def stopFlashing():
GPIO.output(11,0)
GPIO.output(12,0)
def progExit():
GPIO.cleanup()
atexit.register(progExit)
btn_prev_state = 0
lightsonoff = 0
while 1:
input = GPIO.input(16)
if (input and input != btn_prev_state):
btn_prev_state = input
if (not lightsonoff):
#print "On"
stop_event = threading.Event()
thread = threading.Thread(target = flashLights, args=(1,stop_event))
thread.start()
lightsonoff = 1
else:
#print "Off"
stop_event.set()
lightsonoff = 0
stopFlashing()
else:
btn_prev_state = input
time.sleep(0.05)
|
superspider_mongo.py | # -*- coding: utf-8 -*-
import time
import json
import datetime
import requests
from urllib.parse import urlencode
import re
from bs4 import BeautifulSoup
from requests.exceptions import ConnectionError
import threading
import queue
import pymongo
from config import *
lock = threading.RLock()
q = queue.Queue()
client = pymongo.MongoClient(MONGO_URL, connect=False)
db = client[MONGO_DB]
table = db[MONGO_TABLE]
index_url = 'http://www.singaporepools.com.sg/DataFileArchive/Lottery/Output/toto_result_draw_list_en.html'
detail_url = 'http://www.singaporepools.com.sg/en/product/sr/Pages/toto_results.aspx'
def handle_detail_page(q):
while not q.empty():
item = q.get()
detail = get_page_detail(item[0])
if detail:
flag = parse_page_detail(detail, item[1])
if not flag:
q.put(item)
else:
q.task_done()
def get_page_index():
val = datetime.datetime.now().strftime("%Yy%mm%dd%Hh%Mm")
data = {'v': val}
params = urlencode(data)
url = index_url + '?' + params
response = requests.get(url)
if response.status_code == 200:
return response.text
def parse_page_index(html):
pattern = re.compile("queryString='(.*?)' value=.*?'True'>(.*?)</option>", re.S)
items = re.findall(pattern, html)
print(len(items))
for item in items:
yield item
# return items[:10]
def get_page_detail(param):
url = detail_url + '?' + param
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except ConnectionError:
print('connection')
return None
def get_text(soup, tag, class_name):
ele = soup.find(tag, class_=class_name)
if ele:
return ele.text
else:
print('fail parse')
return None
def save_lucks(result):
if db[MONGO_TABLE].insert(result):
print('Successfully Saved to Mongo', result)
return True
return False
def parse_page_detail(html, date):
soup = BeautifulSoup(html, 'html.parser')
lucks = []
for i in range(PICK_NUMBER):
luck = get_text(soup, 'td', 'win'+str(i+1))
if luck:
lucks.append(int(luck))
else:
return False
additional = get_text(soup, 'td', 'additional')
if additional:
# lucks.append(int(additional))
additional = int(additional)
else:
return False
draw_no = get_text(soup, 'th', 'drawNumber')
if draw_no:
draw_no = int(draw_no[-4:])
else:
return False
result = {
'number': draw_no,
'date': date,
'lucks': lucks,
'additional': additional,
}
print(result)
return save_lucks(result)
def main():
threads = []
text = get_page_index()
for item in parse_page_index(text):
if table.find_one({'date': item[1]}):
print('{} exists.'.format(item[1]))
continue
q.put(item)
# here sleep is must
time.sleep(0.1)
# the number of thread cannot be small
for i in range(NUMBER_THREAD):
th = threading.Thread(target=handle_detail_page, args=(q, ))
# th.setDaemon(True)
threads.append(th)
th.start()
# q.join()
for th in threads:
th.join()
# print(len(all_lucks))
if __name__ == '__main__':
start = datetime.datetime.now()
main()
end = datetime.datetime.now()
print(end-start)
|
reaper.py | # Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016
# - Thomas Beermann <thomas.beermann@cern.ch>, 2016-2019
# - Wen Guan <wguan.icedew@gmail.com>, 2016
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
#
# PY3K COMPATIBLE
'''
Reaper is a daemon to manage file deletion.
'''
from __future__ import print_function, division
import datetime
import hashlib
import logging
import math
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.db.sqla.constants import ReplicaState
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, ServiceUnavailable, RSEAccessDenied,
ReplicaUnAvailable, ResourceTemporaryUnavailable,
DatabaseException, UnsupportedOperation,
ReplicaNotFound, RSENotFound)
from rucio.common.utils import chunks
from rucio.core import monitor
from rucio.core import rse as rse_core
from rucio.core.credential import get_signed_url
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.replica import (list_unlocked_replicas, update_replicas_states,
delete_replicas)
from rucio.core.rse import get_rse_attribute, sort_rses, get_rse_name
from rucio.core.rse_expression_parser import parse_expression
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def __check_rse_usage(rse_id):
"""
Internal method to check RSE usage and limits.
:param rse_id: the rse id.
:returns : max_being_deleted_files, needed_free_space, used, free.
"""
max_being_deleted_files, needed_free_space, used, free = None, None, None, None
rse = get_rse_name(rse_id=rse_id)
# Get RSE limits
limits = rse_core.get_rse_limits(rse_id=rse_id)
if not limits and 'MinFreeSpace' not in limits and 'MaxBeingDeletedFiles' not in limits:
return max_being_deleted_files, needed_free_space, used, free
min_free_space = limits.get('MinFreeSpace')
max_being_deleted_files = limits.get('MaxBeingDeletedFiles')
# Check from which sources to get used and total spaces
# Default is storage
source_for_total_space, source_for_used_space = 'storage', 'storage'
values = get_rse_attribute(rse_id=rse_id, key='source_for_total_space')
if values:
source_for_total_space = values[0]
values = get_rse_attribute(rse_id=rse_id, key='source_for_used_space')
if values:
source_for_used_space = values[0]
logging.debug('RSE: %(rse)s, source_for_total_space: %(source_for_total_space)s, '
'source_for_used_space: %(source_for_used_space)s' % locals())
# Get total and used space
usage = rse_core.get_rse_usage(rse_id=rse_id, source=source_for_total_space)
if not usage:
return max_being_deleted_files, needed_free_space, used, free
for var in usage:
total, used = var['total'], var['used']
break
if source_for_total_space != source_for_used_space:
usage = rse_core.get_rse_usage(rse_id=rse_id, source=source_for_used_space)
if not usage:
return max_being_deleted_files, needed_free_space, None, free
for var in usage:
used = var['used']
break
free = total - used
if min_free_space:
needed_free_space = min_free_space - free
return max_being_deleted_files, needed_free_space, used, free
def reaper(rses, worker_number=1, child_number=1, total_children=1, chunk_size=100,
once=False, greedy=False, scheme=None, delay_seconds=0):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param child_number: The child number.
:param total_children: The total number of children created per worker.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
"""
logging.info('Starting Reaper: Worker %(worker_number)s, '
'child %(child_number)s will work on RSEs: ' % locals() + ', '.join([rse['rse'] for rse in rses]))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
# Generate a hash just for the subset of RSEs
rse_names = [rse['rse'] for rse in rses]
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rse_names)).hexdigest()
sanity_check(executable=None, hostname=hostname)
nothing_to_do = {}
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
checkpoint_time = datetime.datetime.now()
# logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals()))
max_deleting_rate = 0
for rse in sort_rses(rses):
try:
if checkpoint_time + datetime.timedelta(minutes=1) < datetime.datetime.now():
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
# logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals()))
checkpoint_time = datetime.datetime.now()
if rse['id'] in nothing_to_do and nothing_to_do[rse['id']] > datetime.datetime.now():
continue
logging.info('Reaper %s-%s: Running on RSE %s %s', worker_number, child_number,
rse['rse'], nothing_to_do.get(rse['id']))
rse_info = rsemgr.get_rse_info(rse_id=rse['id'])
rse_protocol = rse_core.get_rse_protocols(rse_id=rse['id'])
if not rse_protocol['availability_delete']:
logging.info('Reaper %s-%s: RSE %s is not available for deletion', worker_number, child_number, rse_info['rse'])
nothing_to_do[rse['id']] = datetime.datetime.now() + datetime.timedelta(minutes=30)
continue
# Temporary hack to force gfal for deletion
for protocol in rse_info['protocols']:
if protocol['impl'] == 'rucio.rse.protocols.srm.Default' or protocol['impl'] == 'rucio.rse.protocols.gsiftp.Default':
protocol['impl'] = 'rucio.rse.protocols.gfal.Default'
needed_free_space, max_being_deleted_files = None, 100
needed_free_space_per_child = None
if not greedy:
max_being_deleted_files, needed_free_space, used, free = __check_rse_usage(rse_id=rse['id'])
logging.info('Reaper %(worker_number)s-%(child_number)s: Space usage for RSE %(rse)s - max_being_deleted_files: %(max_being_deleted_files)s, needed_free_space: %(needed_free_space)s, used: %(used)s, free: %(free)s' % locals())
if needed_free_space <= 0:
needed_free_space, needed_free_space_per_child = 0, 0
logging.info('Reaper %s-%s: free space is above minimum limit for %s', worker_number, child_number, rse['rse'])
else:
if total_children and total_children > 0:
needed_free_space_per_child = needed_free_space / float(total_children)
start = time.time()
with monitor.record_timer_block('reaper.list_unlocked_replicas'):
replicas = list_unlocked_replicas(rse_id=rse['id'],
bytes=needed_free_space_per_child,
limit=max_being_deleted_files,
worker_number=child_number,
total_workers=total_children,
delay_seconds=delay_seconds)
logging.debug('Reaper %s-%s: list_unlocked_replicas on %s for %s bytes in %s seconds: %s replicas', worker_number, child_number, rse['rse'], needed_free_space_per_child, time.time() - start, len(replicas))
if not replicas:
nothing_to_do[rse['id']] = datetime.datetime.now() + datetime.timedelta(minutes=30)
logging.info('Reaper %s-%s: No replicas to delete %s. The next check will occur at %s',
worker_number, child_number, rse['rse'],
nothing_to_do[rse['id']])
continue
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
for files in chunks(replicas, chunk_size):
logging.debug('Reaper %s-%s: Running on : %s', worker_number, child_number, str(files))
try:
update_replicas_states(replicas=[dict(replica.items() + [('state', ReplicaState.BEING_DELETED), ('rse_id', rse['id'])]) for replica in files], nowait=True)
for replica in files:
try:
replica['pfn'] = str(rsemgr.lfns2pfns(rse_settings=rse_info,
lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}],
operation='delete', scheme=scheme).values()[0])
except (ReplicaUnAvailable, ReplicaNotFound) as error:
err_msg = 'Failed to get pfn UNAVAILABLE replica %s:%s on %s with error %s' % (replica['scope'], replica['name'], rse['rse'], str(error))
logging.warning('Reaper %s-%s: %s', worker_number, child_number, err_msg)
replica['pfn'] = None
monitor.record_counter(counters='reaper.deletion.being_deleted', delta=len(files))
try:
deleted_files = []
prot.connect()
for replica in files:
try:
logging.info('Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
start = time.time()
if rse['staging_area'] or rse['rse'].endswith("STAGING"):
logging.warning('Reaper %s-%s: Deletion STAGING of %s:%s as %s on %s, will only delete the catalog and not do physical deletion',
worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
else:
if replica['pfn']:
pfn = replica['pfn']
# sign the URL if necessary
if prot.attributes['scheme'] == 'https' and rse_info['sign_url'] is not None:
pfn = get_signed_url(rse_info['sign_url'], 'delete', pfn)
prot.delete(pfn)
else:
logging.warning('Reaper %s-%s: Deletion UNAVAILABLE of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
monitor.record_timer('daemons.reaper.delete.%s.%s' % (prot.attributes['scheme'], rse['rse']), (time.time() - start) * 1000)
duration = time.time() - start
deleted_files.append({'scope': replica['scope'], 'name': replica['name']})
add_message('deletion-done', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'duration': duration,
'protocol': prot.attributes['scheme']})
logging.info('Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], duration)
except SourceNotFound:
err_msg = 'Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
logging.warning(err_msg)
deleted_files.append({'scope': replica['scope'], 'name': replica['name']})
if replica['state'] == ReplicaState.AVAILABLE:
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(err_msg),
'protocol': prot.attributes['scheme']})
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error))
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error),
'protocol': prot.attributes['scheme']})
except Exception as error:
logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc()))
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error),
'protocol': prot.attributes['scheme']})
except:
logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc()))
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
for replica in files:
logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error))
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error),
'protocol': prot.attributes['scheme']})
break
finally:
prot.close()
start = time.time()
with monitor.record_timer_block('reaper.delete_replicas'):
delete_replicas(rse_id=rse['id'], files=deleted_files)
logging.debug('Reaper %s-%s: delete_replicas successes %s %s %s', worker_number, child_number, rse['rse'], len(deleted_files), time.time() - start)
monitor.record_counter(counters='reaper.deletion.done', delta=len(deleted_files))
except DatabaseException as error:
logging.warning('Reaper %s-%s: DatabaseException %s', worker_number, child_number, str(error))
except UnsupportedOperation as error:
logging.warning('Reaper %s-%s: UnsupportedOperation %s', worker_number, child_number, str(error))
except:
logging.critical(traceback.format_exc())
except RSENotFound as error:
logging.warning('Reaper %s-%s: RSE not found %s', worker_number, child_number, str(error))
except:
logging.critical(traceback.format_exc())
if once:
break
time.sleep(1)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, threads_per_worker=None, once=False, greedy=False, rses=[], scheme=None, exclude_rses=None, include_rses=None, delay_seconds=0):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
all_rses = rse_core.list_rses()
if rses:
invalid = set(rses) - set([rse['rse'] for rse in all_rses])
if invalid:
msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',
', '.join([repr(rse) for rse in invalid]))
raise RSENotFound(msg)
rses = [rse for rse in all_rses if rse['rse'] in rses]
else:
rses = all_rses
if exclude_rses:
excluded_rses = parse_expression(exclude_rses)
rses = [rse for rse in rses if rse not in excluded_rses]
if include_rses:
included_rses = parse_expression(include_rses)
rses = [rse for rse in rses if rse in included_rses]
if not rses:
logging.error('Reaper: No RSEs found. Exiting.')
return
logging.info('Reaper: This instance will work on RSEs: ' + ', '.join([rse['rse'] for rse in rses]))
threads = []
nb_rses_per_worker = int(math.ceil(len(rses) / float(total_workers))) or 1
rses = random.sample(rses, len(rses))
for worker in range(total_workers):
for child in range(threads_per_worker or 1):
rses_list = rses[worker * nb_rses_per_worker: worker * nb_rses_per_worker + nb_rses_per_worker]
if not rses_list:
logging.warning('Reaper: Empty RSEs list for worker %(worker)s' % locals())
continue
kwargs = {'worker_number': worker,
'child_number': child + 1,
'total_children': threads_per_worker or 1,
'once': once,
'chunk_size': chunk_size,
'greedy': greedy,
'rses': rses_list,
'delay_seconds': delay_seconds,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, child: %s' % (worker, child + 1)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
__init__.py | import io
import os
import pathlib
import socket
import threading
import time
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import datetime
from multiprocessing import Process
from queue import Queue
from typing import Optional, Union, Dict, Tuple, IO
from platypush.config import Config
from platypush.message.event.camera import CameraRecordingStartedEvent, CameraPictureTakenEvent, \
CameraRecordingStoppedEvent, CameraVideoRenderedEvent
from platypush.plugins import Plugin, action
from platypush.plugins.camera.model.camera import CameraInfo, Camera
from platypush.plugins.camera.model.exceptions import CameraException, CaptureAlreadyRunningException
from platypush.plugins.camera.model.writer import VideoWriter, StreamWriter
from platypush.plugins.camera.model.writer.ffmpeg import FFmpegFileWriter
from platypush.plugins.camera.model.writer.preview import PreviewWriter, PreviewWriterFactory
from platypush.utils import get_plugin_name_by_class
__all__ = ['Camera', 'CameraInfo', 'CameraException', 'CameraPlugin', 'CaptureAlreadyRunningException',
'StreamWriter']
class CameraPlugin(Plugin, ABC):
"""
Abstract plugin to control camera devices.
If the :class:`platypush.backend.http.HttpBackend` is enabled then the plugins that implement this class can
expose two endpoints:
- ``http://host:8008/camera/<plugin>/photo<.extension>`` to capture a photo from the camera, where
``.extension`` can be ``.jpg``, ``.png`` or ``.bmp``.
- ``http://host:8008/camera/<plugin>/video<.extension>`` to get a live feed from the camera, where
``.extension`` can be ``.mjpeg``, ``.mkv``/``.webm``, ``.mp4``/``.h264`` or ``.h265``.
Both the endpoints support the same parameters of the constructor of this class (e.g. ``device``, ``warmup_frames``,
``duration`` etc.) as ``GET`` parameters.
Requires:
* **Pillow** (``pip install Pillow``) [optional] default handler for image transformations.
* **wxPython** (``pip install wxPython``) [optional] default handler for camera previews (``ffplay`` will be
used as a fallback if ``wxPython`` is not installed).
* **ffmpeg** (see installation instructions for your OS) for rendering/streaming videos.
Triggers:
* :class:`platypush.message.event.camera.CameraRecordingStartedEvent`
when a new video recording/photo burst starts
* :class:`platypush.message.event.camera.CameraRecordingStoppedEvent`
when a video recording/photo burst ends
* :class:`platypush.message.event.camera.CameraVideoRenderedEvent`
when a sequence of captured is successfully rendered into a video
* :class:`platypush.message.event.camera.CameraPictureTakenEvent`
when a snapshot is captured and stored to an image file
"""
_camera_class = Camera
_camera_info_class = CameraInfo
_video_writer_class = FFmpegFileWriter
def __init__(self, device: Optional[Union[int, str]] = None, resolution: Tuple[int, int] = (640, 480),
frames_dir: Optional[str] = None, warmup_frames: int = 5, warmup_seconds: Optional[float] = 0.,
capture_timeout: Optional[float] = 20.0, scale_x: Optional[float] = None,
scale_y: Optional[float] = None, rotate: Optional[float] = None, grayscale: Optional[bool] = None,
color_transform: Optional[Union[int, str]] = None, fps: float = 16, horizontal_flip: bool = False,
vertical_flip: bool = False, input_format: Optional[str] = None, output_format: Optional[str] = None,
stream_format: str = 'mjpeg', listen_port: Optional[int] = 5000, bind_address: str = '0.0.0.0',
ffmpeg_bin: str = 'ffmpeg', input_codec: Optional[str] = None, output_codec: Optional[str] = None,
**kwargs):
"""
:param device: Identifier of the default capturing device.
:param resolution: Default resolution, as a tuple of two integers.
:param frames_dir: Directory where the camera frames will be stored (default:
``~/.local/share/platypush/<plugin.name>/frames``)
:param warmup_frames: Cameras usually take a while to adapt their
luminosity and focus to the environment when taking a picture.
This parameter allows you to specify the number of "warmup" frames
to capture upon picture command before actually capturing a frame
(default: 5 but you may want to calibrate this parameter for your
camera)
:param warmup_seconds: Number of seconds to wait before a picture is taken or the first frame of a
video/sequence is captured (default: 0).
:param capture_timeout: Maximum number of seconds to wait between the programmed termination of a capture
session and the moment the device is released.
:param scale_x: If set, the images will be scaled along the x axis by the specified factor
:param scale_y: If set, the images will be scaled along the y axis by the specified factor
:param color_transform: Color transformation to apply to the images.
:param grayscale: Whether the output should be converted to grayscale.
:param rotate: If set, the images will be rotated by the specified number of degrees
:param fps: Frames per second (default: 25).
:param horizontal_flip: If set, the images will be flipped on the horizontal axis.
:param vertical_flip: If set, the images will be flipped on the vertical axis.
:param listen_port: Default port to be used for streaming over TCP (default: 5000).
:param bind_address: Default bind address for TCP streaming (default: 0.0.0.0, accept any connections).
:param input_codec: Specify the ffmpeg video codec (``-vcodec``) used for the input.
:param output_codec: Specify the ffmpeg video codec (``-vcodec``) to be used for encoding the output. For some
ffmpeg output formats (e.g. ``h264`` and ``rtp``) this may default to ``libxvid``.
:param input_format: Plugin-specific format/type for the input stream.
:param output_format: Plugin-specific format/type for the output videos.
:param ffmpeg_bin: Path to the ffmpeg binary (default: ``ffmpeg``).
:param stream_format: Default format for the output when streamed to a network device. Available:
- ``MJPEG`` (default)
- ``H264`` (over ``ffmpeg``)
- ``H265`` (over ``ffmpeg``)
- ``MKV`` (over ``ffmpeg``)
- ``MP4`` (over ``ffmpeg``)
"""
super().__init__(**kwargs)
self.workdir = os.path.join(Config.get('workdir'), get_plugin_name_by_class(self))
pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True)
# noinspection PyArgumentList
self.camera_info = self._camera_info_class(device, color_transform=color_transform, warmup_frames=warmup_frames,
warmup_seconds=warmup_seconds, rotate=rotate, scale_x=scale_x,
scale_y=scale_y, capture_timeout=capture_timeout, fps=fps,
input_format=input_format, output_format=output_format,
stream_format=stream_format, resolution=resolution,
grayscale=grayscale, listen_port=listen_port,
horizontal_flip=horizontal_flip, vertical_flip=vertical_flip,
ffmpeg_bin=ffmpeg_bin, input_codec=input_codec,
output_codec=output_codec, bind_address=bind_address,
frames_dir=os.path.abspath(
os.path.expanduser(frames_dir or
os.path.join(self.workdir, 'frames'))))
self._devices: Dict[Union[int, str], Camera] = {}
self._streams: Dict[Union[int, str], Camera] = {}
def _merge_info(self, **info) -> CameraInfo:
merged_info = self.camera_info.clone()
merged_info.set(**info)
return merged_info
def open_device(self, device: Optional[Union[int, str]] = None, stream: bool = False, **params) -> Camera:
"""
Initialize and open a device.
:return: The initialized camera device.
:raises: :class:`platypush.plugins.camera.CaptureSessionAlreadyRunningException`
"""
info = self._merge_info(**params)
if device is None:
device = info.device
elif device not in self._devices:
info.device = device
else:
info = self._devices[device].info.clone()
assert device is not None, 'No device specified/configured'
if device in self._devices:
camera = self._devices[device]
if camera.capture_thread and camera.capture_thread.is_alive() and camera.start_event.is_set():
raise CaptureAlreadyRunningException(device)
camera.start_event.clear()
camera.capture_thread = None
else:
# noinspection PyArgumentList
camera = self._camera_class(info=info)
camera.info.set(**params)
camera.object = self.prepare_device(camera)
if stream:
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
camera.stream = writer_class(camera=camera, plugin=self)
if camera.info.frames_dir:
pathlib.Path(os.path.abspath(os.path.expanduser(camera.info.frames_dir))).mkdir(
mode=0o755, exist_ok=True, parents=True)
self._devices[device] = camera
return camera
def close_device(self, camera: Camera, wait_capture: bool = True) -> None:
"""
Close and release a device.
"""
name = camera.info.device
self.stop_preview(camera)
self.release_device(camera)
camera.start_event.clear()
if wait_capture:
self.wait_capture(camera)
if name in self._devices:
del self._devices[name]
def wait_capture(self, camera: Camera) -> None:
"""
Wait until a capture session terminates.
:param camera: Camera object. ``camera.info.capture_timeout`` is used as a capture thread termination timeout
if set.
"""
if camera.capture_thread and camera.capture_thread.is_alive() and \
threading.get_ident() != camera.capture_thread.ident:
try:
camera.capture_thread.join(timeout=camera.info.capture_timeout)
except Exception as e:
self.logger.warning('Error on FFmpeg capture wait: {}'.format(str(e)))
@contextmanager
def open(self, device: Optional[Union[int, str]] = None, stream: bool = None, **info) -> Camera:
"""
Initialize and open a device using a context manager pattern.
:param device: Capture device by name, path or ID.
:param stream: If set, the frames will be streamed to ``camera.stream``.
:param info: Camera parameters override - see constructors parameters.
:return: The initialized :class:`platypush.plugins.camera.Camera` object.
"""
camera = None
try:
camera = self.open_device(device, stream=stream, **info)
yield camera
finally:
self.close_device(camera)
@abstractmethod
def prepare_device(self, device: Camera):
"""
Prepare a device using the plugin-specific logic - to be implemented by the derived classes.
:param device: An initialized :class:`platypush.plugins.camera.Camera` object.
"""
raise NotImplementedError()
@abstractmethod
def release_device(self, device: Camera):
"""
Release a device using the plugin-specific logic - to be implemented by the derived classes.
:param device: An initialized :class:`platypush.plugins.camera.Camera` object.
"""
raise NotImplementedError()
@abstractmethod
def capture_frame(self, device: Camera, *args, **kwargs):
"""
Capture a frame from a device using the plugin-specific logic - to be implemented by the derived classes.
:param device: An initialized :class:`platypush.plugins.camera.Camera` object.
"""
raise NotImplementedError()
# noinspection PyShadowingBuiltins
@staticmethod
def store_frame(frame, filepath: str, format: Optional[str] = None):
"""
Capture a frame to the filesystem using the ``PIL`` library - it can be overridden by derived classes.
:param frame: Frame object (default: a byte-encoded object or a ``PIL.Image`` object).
:param filepath: Destination file.
:param format: Output format.
"""
from PIL import Image
if isinstance(frame, bytes):
frame = list(frame)
elif not isinstance(frame, Image.Image):
frame = Image.fromarray(frame)
save_args = {}
if format:
save_args['format'] = format
frame.save(filepath, **save_args)
def _store_frame(self, frame, frames_dir: Optional[str] = None, image_file: Optional[str] = None,
*args, **kwargs) -> str:
"""
:meth:`.store_frame` wrapper.
"""
if image_file:
filepath = os.path.abspath(os.path.expanduser(image_file))
else:
filepath = os.path.abspath(os.path.expanduser(
os.path.join(frames_dir or '', datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f.jpg'))))
pathlib.Path(filepath).parent.mkdir(mode=0o755, exist_ok=True, parents=True)
self.store_frame(frame, filepath, *args, **kwargs)
return filepath
def start_preview(self, camera: Camera):
if camera.preview and not camera.preview.closed:
self.logger.info('A preview window is already active on device {}'.format(camera.info.device))
return
camera.preview = PreviewWriterFactory.get(camera, self)
if isinstance(camera.preview, Process):
camera.preview.start()
def stop_preview(self, camera: Camera):
if camera.preview and not camera.preview.closed:
camera.preview.close()
if isinstance(camera.preview, Process) and camera.preview.is_alive():
camera.preview.terminate()
camera.preview.join(timeout=5.0)
if isinstance(camera.preview, Process) and camera.preview.is_alive():
camera.preview.kill()
camera.preview = None
def frame_processor(self, frame_queue: Queue, camera: Camera, image_file: Optional[str] = None):
while True:
frame = frame_queue.get()
if frame is None:
break
frame = self.transform_frame(frame, camera.info.color_transform)
if camera.info.grayscale:
frame = self.to_grayscale(frame)
frame = self.rotate_frame(frame, camera.info.rotate)
frame = self.flip_frame(frame, camera.info.horizontal_flip, camera.info.vertical_flip)
frame = self.scale_frame(frame, camera.info.scale_x, camera.info.scale_y)
for output in camera.get_outputs():
output.write(frame)
if camera.info.frames_dir or image_file:
self._store_frame(frame=frame, frames_dir=camera.info.frames_dir, image_file=image_file)
def capturing_thread(self, camera: Camera, duration: Optional[float] = None, video_file: Optional[str] = None,
image_file: Optional[str] = None, n_frames: Optional[int] = None, preview: bool = False,
**kwargs):
"""
Camera capturing thread.
:param camera: An initialized :class:`platypush.plugins.camera.Camera` object.
:param duration: Capturing session duration in seconds (default: until :meth:`.stop_capture` is called).
:param video_file: If set, the session will be recorded to this output video file (video capture mode).
:param image_file: If set, the output of the session will be a single image file (photo mode).
:param n_frames: Number of frames to be captured (default: until :meth:`.stop_capture` is called).
:param preview: Start a preview window.
:param kwargs: Extra arguments to be passed to :meth:`.capture_frame`.
"""
camera.start_event.wait()
recording_started_time = time.time()
captured_frames = 0
evt_args = {
'device': camera.info.device,
}
if video_file or image_file:
evt_args['filename'] = video_file or image_file
if camera.info.frames_dir:
evt_args['frames_dir'] = camera.info.frames_dir
if preview:
self.start_preview(camera)
if duration and camera.info.warmup_seconds:
duration = duration + camera.info.warmup_seconds
if video_file:
camera.file_writer = self._video_writer_class(camera=camera, plugin=self, output_file=video_file)
frame_queue = Queue()
frame_processor = threading.Thread(target=self.frame_processor,
kwargs=dict(frame_queue=frame_queue, camera=camera, image_file=image_file))
frame_processor.start()
self.fire_event(CameraRecordingStartedEvent(**evt_args))
try:
while camera.start_event.is_set():
if (duration and time.time() - recording_started_time >= duration) \
or (n_frames and captured_frames >= n_frames):
break
frame_capture_start = time.time()
try:
frame = self.capture_frame(camera, **kwargs)
if not frame:
self.logger.warning('Invalid frame received, terminating the capture session')
break
frame_queue.put(frame)
except AssertionError as e:
self.logger.warning(str(e))
continue
if not n_frames or not camera.info.warmup_seconds or \
(time.time() - recording_started_time >= camera.info.warmup_seconds):
captured_frames += 1
if camera.info.fps:
wait_time = (1. / camera.info.fps) - (time.time() - frame_capture_start)
if wait_time > 0:
time.sleep(wait_time)
finally:
frame_queue.put(None)
self.stop_preview(camera)
for output in camera.get_outputs():
# noinspection PyBroadException
try:
output.close()
except:
pass
self.close_device(camera, wait_capture=False)
frame_processor.join(timeout=5.0)
self.fire_event(CameraRecordingStoppedEvent(**evt_args))
if image_file:
self.fire_event(CameraPictureTakenEvent(filename=image_file))
if video_file:
self.fire_event(CameraVideoRenderedEvent(filename=video_file))
def start_camera(self, camera: Camera, preview: bool = False, *args, **kwargs):
"""
Start a camera capture session.
:param camera: An initialized :class:`platypush.plugins.camera.Camera` object.
:param preview: Show a preview of the camera frames.
"""
assert not (camera.capture_thread and camera.capture_thread.is_alive()), \
'A capture session is already in progress'
camera.capture_thread = threading.Thread(target=self.capturing_thread, args=(camera, *args),
kwargs={'preview': preview, **kwargs})
camera.capture_thread.start()
camera.start_event.set()
@action
def capture_video(self, duration: Optional[float] = None, video_file: Optional[str] = None, preview: bool = False,
**camera) -> Union[str, dict]:
"""
Capture a video.
:param duration: Record duration in seconds (default: None, record until ``stop_capture``).
:param video_file: If set, the stream will be recorded to the specified video file (default: None).
:param camera: Camera parameters override - see constructors parameters.
:param preview: Show a preview of the camera frames.
:return: If duration is specified, the method will wait until the recording is done and return the local path
to the recorded resource. Otherwise, it will return the status of the camera device after starting it.
"""
camera = self.open_device(**camera)
self.start_camera(camera, duration=duration, video_file=video_file, frames_dir=None, image_file=None,
preview=preview)
if duration:
self.wait_capture(camera)
return video_file
return self.status(camera.info.device)
@action
def stop_capture(self, device: Optional[Union[int, str]] = None):
"""
Stop any capturing session on the specified device.
:param device: Name/path/ID of the device to stop (default: all the active devices).
"""
devices = self._devices.copy()
stop_devices = list(devices.values())[:]
if device:
stop_devices = [self._devices[device]] if device in self._devices else []
for device in stop_devices:
self.close_device(device)
@action
def capture_image(self, image_file: str, preview: bool = False, **camera) -> str:
"""
Capture an image.
:param image_file: Path where the output image will be stored.
:param camera: Camera parameters override - see constructors parameters.
:param preview: Show a preview of the camera frames.
:return: The local path to the saved image.
"""
with self.open(**camera) as camera:
warmup_frames = camera.info.warmup_frames if camera.info.warmup_frames else 1
self.start_camera(camera, image_file=image_file, n_frames=warmup_frames, preview=preview)
self.wait_capture(camera)
return image_file
@action
def take_picture(self, image_file: str, preview: bool = False, **camera) -> str:
"""
Alias for :meth:`.capture_image`.
:param image_file: Path where the output image will be stored.
:param camera: Camera parameters override - see constructors parameters.
:param preview: Show a preview of the camera frames.
:return: The local path to the saved image.
"""
return self.capture_image(image_file, **camera)
@action
def capture_sequence(self, duration: Optional[float] = None, n_frames: Optional[int] = None, preview: bool = False,
**camera) -> str:
"""
Capture a sequence of frames from a camera and store them to a directory.
:param duration: Duration of the sequence in seconds (default: until :meth:`.stop_capture` is called).
:param n_frames: Number of images to be captured (default: until :meth:`.stop_capture` is called).
:param camera: Camera parameters override - see constructors parameters. ``frames_dir`` and ``fps`` in
particular can be specifically tuned for ``capture_sequence``.
:param preview: Show a preview of the camera frames.
:return: The directory where the image files have been stored.
"""
with self.open(**camera) as camera:
self.start_camera(camera, duration=duration, n_frames=n_frames, preview=preview)
self.wait_capture(camera)
return camera.info.frames_dir
@action
def capture_preview(self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera) -> dict:
"""
Start a camera preview session.
:param duration: Preview duration (default: until :meth:`.stop_capture` is called).
:param n_frames: Number of frames to display before closing (default: until :meth:`.stop_capture` is called).
:param camera: Camera object properties.
:return: The status of the device.
"""
camera = self.open_device(frames_dir=None, **camera)
self.start_camera(camera, duration=duration, n_frames=n_frames, preview=True)
return self.status(camera.info.device)
@staticmethod
def _prepare_server_socket(camera: Camera) -> socket.socket:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((camera.info.bind_address or '0.0.0.0', camera.info.listen_port))
server_socket.listen(1)
server_socket.settimeout(1)
return server_socket
def _accept_client(self, server_socket: socket.socket) -> Optional[IO]:
try:
sock = server_socket.accept()[0]
self.logger.info('Accepted client connection from {}'.format(sock.getpeername()))
return sock.makefile('wb')
except socket.timeout:
return
def streaming_thread(self, camera: Camera, stream_format: str, duration: Optional[float] = None):
streaming_started_time = time.time()
server_socket = self._prepare_server_socket(camera)
sock = None
self.logger.info('Starting streaming on port {}'.format(camera.info.listen_port))
try:
while camera.stream_event.is_set():
if duration and time.time() - streaming_started_time >= duration:
break
sock = self._accept_client(server_socket)
if not sock:
continue
if camera.info.device not in self._devices:
info = camera.info.to_dict()
info['stream_format'] = stream_format
camera = self.open_device(stream=True, **info)
camera.stream.sock = sock
self.start_camera(camera, duration=duration, frames_dir=None, image_file=None)
finally:
self._cleanup_stream(camera, server_socket, sock)
self.logger.info('Stopped camera stream')
def _cleanup_stream(self, camera: Camera, server_socket: socket.socket, client: IO):
if client:
try:
client.close()
except Exception as e:
self.logger.warning('Error on client socket close: {}'.format(str(e)))
try:
server_socket.close()
except Exception as e:
self.logger.warning('Error on server socket close: {}'.format(str(e)))
if camera.stream:
try:
camera.stream.close()
except Exception as e:
self.logger.warning('Error while closing the encoding stream: {}'.format(str(e)))
@action
def start_streaming(self, duration: Optional[float] = None, stream_format: str = 'mkv', **camera) -> dict:
"""
Expose the video stream of a camera over a TCP connection.
:param duration: Streaming thread duration (default: until :meth:`.stop_streaming` is called).
:param stream_format: Format of the output stream - e.g. ``h264``, ``mjpeg``, ``mkv`` etc. (default: ``mkv``).
:param camera: Camera object properties - see constructor parameters.
:return: The status of the device.
"""
camera = self.open_device(stream=True, stream_format=stream_format, **camera)
return self._start_streaming(camera, duration, stream_format)
def _start_streaming(self, camera: Camera, duration: Optional[float], stream_format: str):
assert camera.info.listen_port, 'No listen_port specified/configured'
assert not camera.stream_event.is_set() and camera.info.device not in self._streams, \
'A streaming session is already running for device {}'.format(camera.info.device)
self._streams[camera.info.device] = camera
camera.stream_event.set()
camera.stream_thread = threading.Thread(target=self.streaming_thread, kwargs=dict(
camera=camera, duration=duration, stream_format=stream_format))
camera.stream_thread.start()
return self.status(camera.info.device)
@action
def stop_streaming(self, device: Optional[Union[int, str]] = None):
"""
Stop a camera over TCP session.
:param device: Name/path/ID of the device to stop (default: all the active devices).
"""
streams = self._streams.copy()
stop_devices = list(streams.values())[:]
if device:
stop_devices = [self._streams[device]] if device in self._streams else []
for device in stop_devices:
self._stop_streaming(device)
def _stop_streaming(self, camera: Camera):
camera.stream_event.clear()
if camera.stream_thread and camera.stream_thread.is_alive():
camera.stream_thread.join(timeout=5.0)
if camera.info.device in self._streams:
del self._streams[camera.info.device]
def _status(self, device: Union[int, str]) -> dict:
camera = self._devices.get(device, self._streams.get(device))
if not camera:
return {}
return {
**camera.info.to_dict(),
'active': True if camera.capture_thread and camera.capture_thread.is_alive() else False,
'capturing': True if camera.capture_thread and camera.capture_thread.is_alive() and camera.start_event.is_set() else False,
'streaming': camera.stream_thread and camera.stream_thread.is_alive() and camera.stream_event.is_set(),
}
@action
def status(self, device: Optional[Union[int, str]] = None):
"""
Returns the status of the specified camera or all the active cameras if ``device`` is ``None``.
"""
if device:
return self._status(device)
return {
id: self._status(device)
for id, camera in self._devices.items()
}
@staticmethod
def transform_frame(frame, color_transform):
"""
Frame color space (e.g. ``RGB24``, ``YUV`` etc.) transform logic. Does nothing unless implemented by a
derived plugin.
"""
return frame.convert(color_transform)
def to_grayscale(self, frame):
"""
Convert a frame to grayscale. The default implementation assumes that frame is a ``PIL.Image`` object.
:param frame: Image frame (default: a ``PIL.Image`` object).
"""
from PIL import ImageOps
return ImageOps.grayscale(frame)
@staticmethod
def rotate_frame(frame, rotation: Optional[Union[float, int]] = None):
"""
Frame rotation logic. The default implementation assumes that frame is a ``PIL.Image`` object.
:param frame: Image frame (default: a ``PIL.Image`` object).
:param rotation: Rotation angle in degrees.
"""
if not rotation:
return frame
return frame.rotate(rotation, expand=True)
@staticmethod
def flip_frame(frame, horizontal_flip: bool = False, vertical_flip: bool = False):
"""
Frame flip logic. Does nothing unless implemented by a derived plugin.
:param frame: Image frame (default: a ``PIL.Image`` object).
:param horizontal_flip: Flip along the horizontal axis.
:param vertical_flip: Flip along the vertical axis.
"""
from PIL import Image
if horizontal_flip:
frame = frame.transpose(Image.FLIP_TOP_BOTTOM)
if vertical_flip:
frame = frame.transpose(Image.FLIP_LEFT_RIGHT)
return frame
@staticmethod
def scale_frame(frame, scale_x: Optional[float] = None, scale_y: Optional[float] = None):
"""
Frame scaling logic. The default implementation assumes that frame is a ``PIL.Image`` object.
:param frame: Image frame (default: a ``PIL.Image`` object).
:param scale_x: X-scale factor.
:param scale_y: Y-scale factor.
"""
from PIL import Image
if not (scale_x and scale_y) or (scale_x == 1 and scale_y == 1):
return frame
size = (int(frame.size[0] * scale_x), int(frame.size[1] * scale_y))
return frame.resize(size, Image.ANTIALIAS)
@staticmethod
def encode_frame(frame, encoding: str = 'jpeg') -> bytes:
"""
Encode a frame to a target type. The default implementation assumes that frame is a ``PIL.Image`` object.
:param frame: Image frame (default: a ``PIL.Image`` object).
:param encoding: Image encoding (e.g. ``jpeg``).
"""
if not encoding:
return frame
with io.BytesIO() as buf:
frame.save(buf, format=encoding)
return buf.getvalue()
@staticmethod
def _get_warmup_seconds(camera: Camera) -> float:
if camera.info.warmup_seconds:
return camera.info.warmup_seconds
if camera.info.warmup_frames and camera.info.fps:
return camera.info.warmup_frames / camera.info.fps
return 0
# vim:sw=4:ts=4:et:
|
iterators.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import math
import queue
import threading
import numpy as np
import torch
from . import data_utils
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
Attributes:
count (int): number of elements consumed from this iterator
"""
def __init__(self, iterable):
self.iterable = iterable
self.count = 0
self.itr = iter(self)
def __len__(self):
return len(self.iterable)
def __iter__(self):
for x in self.iterable:
self.count += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.count < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
class EpochBatchIterator(object):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler): an iterator over batches of
indices
seed (int, optional): seed for random number generator for
reproducibility. Default: 1
num_shards (int, optional): shard the data iterator into N
shards. Default: 1
shard_id (int, optional): which shard of the data iterator to
return. Default: 0
buffer_size (int, optional): number of batches to buffer. Default: 5
"""
def __init__(
self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,
buffer_size=5,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.buffer_size = buffer_size
self.epoch = 0
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = (
hasattr(dataset, 'supports_prefetch') and dataset.supports_prefetch
)
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator. Default: ``True``
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching. Default:
``False``
"""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus)
return self._cur_epoch_itr
def end_of_epoch(self):
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.count
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.count
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))
if itr_pos < len(itr):
self._next_epoch_itr = itr.skip(itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False):
def shuffle_batches(batches, seed):
# set seed based on the seed and epoch number so that we get
# reproducible results when resuming from checkpoints
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
return CountingIterator(BufferedIterator(
torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches,
),
buffer_size=self.buffer_size,
))
class BufferedIterator(object):
"""Wrapper around an iterable that prefetches items into a buffer.
Args:
iterable (iterable): iterable to wrap
buffer_size (int): number of items to prefetch and buffer
"""
def __init__(self, iterable, buffer_size):
self.iterable = iterable
self.q = queue.Queue(maxsize=buffer_size)
self.thread = threading.Thread(target=self._load_q, daemon=True)
self.thread.start()
def __len__(self):
return len(self.iterable)
def __iter__(self):
return self
def __next__(self):
x = self.q.get()
if x is None:
self.thread.join()
raise StopIteration
return x[0]
def _load_q(self):
for x in self.iterable:
self.q.put([x]) # wrap in list so that it's never None
self.q.put(None)
class GroupedIterator(object):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
"""
def __init__(self, iterable, chunk_size):
self._len = int(math.ceil(len(iterable) / float(chunk_size)))
self.itr = iter(iterable)
self.chunk_size = chunk_size
def __len__(self):
return self._len
def __iter__(self):
return self
def __next__(self):
chunk = []
try:
for _ in range(self.chunk_size):
chunk.append(next(self.itr))
except StopIteration as e:
if len(chunk) == 0:
raise e
return chunk
class ShardedIterator(object):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards*. Default: ``None``
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = len(iterable) // num_shards
if len(iterable) % num_shards > 0:
self._sharded_len += 1
self.itr = itertools.zip_longest(
range(self._sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
|
systrace_controller.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
import zlib
from profile_chrome import controllers
from profile_chrome import util
from pylib import cmd_helper
_SYSTRACE_OPTIONS = [
# Compress the trace before sending it over USB.
'-z',
# Use a large trace buffer to increase the polling interval.
'-b', '16384'
]
# Interval in seconds for sampling systrace data.
_SYSTRACE_INTERVAL = 15
class SystraceController(controllers.BaseController):
def __init__(self, device, categories, ring_buffer):
controllers.BaseController.__init__(self)
self._device = device
self._categories = categories
self._ring_buffer = ring_buffer
self._done = threading.Event()
self._thread = None
self._trace_data = None
def __repr__(self):
return 'systrace'
@staticmethod
def GetCategories(device):
return device.RunShellCommand('atrace --list_categories')
def StartTracing(self, _):
self._thread = threading.Thread(target=self._CollectData)
self._thread.start()
def StopTracing(self):
self._done.set()
def PullTrace(self):
self._thread.join()
self._thread = None
if self._trace_data:
output_name = 'systrace-%s' % util.GetTraceTimestamp()
with open(output_name, 'w') as out:
out.write(self._trace_data)
return output_name
def _RunATraceCommand(self, command):
# We use a separate interface to adb because the one from AndroidCommands
# isn't re-entrant.
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + ['shell', 'atrace', '--%s' % command] + \
_SYSTRACE_OPTIONS + self._categories
return cmd_helper.GetCmdOutput(cmd)
def _CollectData(self):
trace_data = []
self._RunATraceCommand('async_start')
try:
while not self._done.is_set():
self._done.wait(_SYSTRACE_INTERVAL)
if not self._ring_buffer or self._done.is_set():
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_dump')))
finally:
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_stop')))
self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
@staticmethod
def _DecodeTraceData(trace_data):
try:
trace_start = trace_data.index('TRACE:')
except ValueError:
raise RuntimeError('Systrace start marker not found')
trace_data = trace_data[trace_start + 6:]
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
# Skip the initial newline.
return trace_data[1:]
|
qt.py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_gui.qt.util import *
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum_gui.qt.amountedit import AmountEdit
from electrum_gui.qt.main_window import StatusBarButton
from electrum.i18n import _
from electrum.plugins import hook
from trustedcoin import TrustedCoinPlugin, server
def need_server(wallet, tx):
from electrum.account import BIP32_Account
# Detect if the server is needed
long_id, short_id = wallet.get_user_id()
xpub3 = wallet.master_public_keys['x3/']
for x in tx.inputs_to_sign():
if x[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x)
if xpub == xpub3:
return True
return False
class Plugin(TrustedCoinPlugin):
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet is was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
@hook
def sign_tx(self, window, tx):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
self.print_error("twofactor:sign_tx")
auth_code = None
if need_server(wallet, tx):
auth_code = self.auth_dialog(window)
else:
self.print_error("twofactor: xpub3 not needed")
window.wallet.auth_code = auth_code
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
if wallet.billing_info is None:
# request billing info before forming the transaction
waiting_dialog(self, window).wait()
if wallet.billing_info is None:
window.show_message('Could not contact server')
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a fee per co-signed transaction. You may pay on each transaction (an extra output will be added to your transaction), or you may purchase prepaid transaction using this dialog.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
v = price_per_tx.get(1)
grid.addWidget(QLabel(_("Price per transaction (not prepaid):")), 0, 0)
grid.addWidget(QLabel(window.format_amount(v) + ' ' + window.base_unit()), 0, 1)
i = 1
if 10 not in price_per_tx:
price_per_tx[10] = 10 * price_per_tx.get(1)
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Price for %d prepaid transactions:"%k), i, 0)
grid.addWidget(QLabel("%d x "%k + window.format_amount(v/k) + ' ' + window.base_unit()), i, 1)
b = QPushButton(_("Buy"))
b.clicked.connect(lambda b, k=k, v=v: self.on_buy(window, k, v, d))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
# tranfer button
#def on_transfer():
# server.transfer_credit(self.user_id, recipient, otp, signature_callback)
# pass
#b = QPushButton(_("Transfer"))
#b.clicked.connect(on_transfer)
#grid.addWidget(b, 1, 2)
#grid.addWidget(QLabel(_("Next Billing Address:")), i, 0)
#grid.addWidget(QLabel(self.billing_info['billing_address']), i, 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "bitcoin:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
def set_enabled():
next_button.setEnabled(re.match(regexp,email_e.text()) is not None)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.set_main_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
return str(email_e.text())
def setup_google_auth(self, window, _id, otp_secret):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel("This wallet is already registered, but it was never authenticated. To finalize your registration, please enter your Google Authenticator Code. If you do not have this code, delete the wallet file and start a new registration")
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
def set_enabled():
window.next_button.setEnabled(len(pw.text()) == 6)
pw.textChanged.connect(set_enabled)
while True:
if not window.set_main_layout(vbox, next_enabled=False,
raise_on_cancel=False):
return False
otp = pw.get_amount()
try:
server.auth(_id, otp)
return True
except:
window.show_message(_('Incorrect password'))
pw.setText('')
|
hook.py | import copy
import json
import logging
import threading
from functools import wraps
from platypush.config import Config
from platypush.message.event import Event
from platypush.message.request import Request
from platypush.procedure import Procedure
from platypush.utils import get_event_class_by_type, set_thread_name, is_functional_hook
logger = logging.getLogger('platypush')
def parse(msg):
""" Builds a dict given another dictionary or
a JSON UTF-8 encoded string/bytearray """
if isinstance(msg, bytes) or isinstance(msg, bytearray):
msg = msg.decode('utf-8')
if isinstance(msg, str):
try:
msg = json.loads(msg.strip())
except json.JSONDecodeError:
logger.warning('Invalid JSON message: {}'.format(msg))
return None
return msg
class EventCondition(object):
""" Event hook condition class """
def __init__(self, type=Event.__class__, priority=None, **kwargs):
"""
Rule constructor.
Params:
type -- Class of the event to be built
kwargs -- Fields rules as a key-value (e.g. source_button=btn_id
or recognized_phrase='Your phrase')
"""
self.type = type
self.args = {}
self.parsed_args = {}
self.priority = priority
for (key, value) in kwargs.items():
# TODO So far we only allow simple value match. If value is a dict
# instead, we should allow more a sophisticated attribute matching,
# e.g. or conditions, in, and other operators.
self.args[key] = value
@classmethod
def build(cls, rule):
""" Builds a rule given either another EventRule, a dictionary or
a JSON UTF-8 encoded string/bytearray """
if isinstance(rule, cls):
return rule
else:
rule = parse(rule)
assert isinstance(rule, dict)
type = get_event_class_by_type(
rule.pop('type') if 'type' in rule else 'Event')
args = {}
for (key, value) in rule.items():
args[key] = value
return cls(type=type, **args)
class EventAction(Request):
""" Event hook action class. It is a special type of runnable request
whose fields can be configured later depending on the event context """
def __init__(self, target=None, action=None, **args):
if target is None:
target = Config.get('device_id')
args_copy = copy.deepcopy(args)
super().__init__(target=target, action=action, **args_copy)
@classmethod
def build(cls, action):
action = super().parse(action)
action['origin'] = Config.get('device_id')
if 'target' not in action:
action['target'] = action['origin']
token = Config.get('token')
if token:
action['token'] = token
return super().build(action)
class EventHook(object):
""" Event hook class. It consists of one conditions and
one or multiple actions to be executed """
def __init__(self, name, priority=None, condition=None, actions=None):
""" Constructor. Takes a name, a EventCondition object and an event action
procedure as input. It may also have a priority attached
as a positive number. If multiple hooks match against an event,
only the ones that have either the maximum match score or the
maximum pre-configured priority will be run. """
self.name = name
self.condition = EventCondition.build(condition or {})
self.actions = actions or []
self.priority = priority or 0
self.condition.priority = self.priority
@classmethod
def build(cls, name, hook):
""" Builds a rule given either another EventRule, a dictionary or
a JSON UTF-8 encoded string/bytearray """
if isinstance(hook, cls):
return hook
else:
hook = parse(hook)
if is_functional_hook(hook):
actions = Procedure(name=name, requests=[hook], _async=False)
return cls(name=name, condition=hook.condition, actions=actions)
assert isinstance(hook, dict)
condition = EventCondition.build(hook['if']) if 'if' in hook else None
actions = []
priority = hook['priority'] if 'priority' in hook else None
condition.priority = priority
if 'then' in hook:
if isinstance(hook['then'], list):
actions = hook['then']
else:
actions = [hook['then']]
actions = Procedure.build(name=name + '__Hook', requests=actions, _async=False)
return cls(name=name, condition=condition, actions=actions, priority=priority)
def matches_event(self, event):
""" Returns an EventMatchResult object containing the information
about the match between the event and this hook """
return event.matches_condition(self.condition)
def run(self, event):
""" Checks the condition of the hook against a particular event and
runs the hook actions if the condition is met """
def _thread_func(result):
set_thread_name('Event-' + self.name)
self.actions.execute(event=event, **result.parsed_args)
result = self.matches_event(event)
if result.is_match:
logger.info('Running hook {} triggered by an event'.format(self.name))
threading.Thread(target=_thread_func, name='Event-' + self.name, args=(result,)).start()
def hook(event_type=Event, **condition):
def wrapper(f):
f.hook = True
f.condition = EventCondition(type=event_type, **condition)
@wraps(f)
def wrapped(*args, **kwargs):
from platypush import Response
try:
ret = f(*args, **kwargs)
if isinstance(ret, Response):
return ret
return Response(output=ret)
except Exception as e:
logger.exception(e)
return Response(errors=[str(e)])
return wrapped
return wrapper
# vim:sw=4:ts=4:et:
|
main.py | # -*- coding: utf-8 -*-
import logging
import multiprocessing
import os
import time
import wave
from multiprocessing import set_start_method
from multiprocessing.queues import Queue
from typing import Optional
import uvicorn
from fastapi import Cookie, Depends, FastAPI, Query, WebSocket, status, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from starlette.responses import FileResponse
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.DEBUG,
datefmt="%H:%M:%S")
app = FastAPI()
root = os.path.dirname(__file__)
app.mount('/static', StaticFiles(directory=os.path.join(root, 'static')), name='static')
templates = Jinja2Templates(directory=os.path.join(root, 'templates'))
@app.get('/favicon.ico')
async def get():
return FileResponse(os.path.join(root, 'static', 'favicon.ico'))
@app.get("/")
async def get(request: Request):
return templates.TemplateResponse('index.html', {'request': request})
async def get_cookie_or_token(
websocket: WebSocket,
session: Optional[str] = Cookie(None),
token: Optional[str] = Query(None),
):
if session is None and token is None:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION)
return session or token
def wav_worker(q: Queue, uid: str, ):
root = os.path.join(os.path.dirname(__file__), 'upload_waves')
os.makedirs(root, exist_ok=True)
filename = os.path.join(root, f'{uid}_{time.time()}.wav')
try:
wav = wave.open(filename, mode='wb')
wav.setframerate(16000)
wav.setnchannels(1)
wav.setsampwidth(2)
while True:
data_bytes = q.get()
wav.writeframes(data_bytes)
print(f'q.get {len(data_bytes)}')
except Exception as e:
logging.debug(e)
finally:
wav.close()
logging.info('leave wav_worker')
@app.websocket("/items/{item_id}/ws")
async def websocket_endpoint(
websocket: WebSocket,
item_id: str,
q: Optional[int] = None,
cookie_or_token: str = Depends(get_cookie_or_token),
):
await websocket.accept()
logging.info('websocket.accept')
ctx = multiprocessing.get_context()
queue = ctx.Queue()
process = ctx.Process(target=wav_worker, args=(queue, item_id))
process.start()
counter = 0
try:
while True:
data_bytes = await websocket.receive_bytes()
data = [int.from_bytes(data_bytes[i:i + 2], byteorder='little', signed=True) for i in
range(0, len(data_bytes), 2)]
await websocket.send_text(
f"Session cookie or query token value is: {cookie_or_token}. counter {counter}"
)
if q is not None:
await websocket.send_text(f"Query parameter q is: {q}")
# await websocket.send_text(f"Message text was: {data}, for item ID: {item_id}")
queue.put(data_bytes)
counter += 1
except Exception as e:
logging.debug(e)
finally:
# Wait for the worker to finish
queue.close()
queue.join_thread()
# use terminate so the while True loop in process will exit
process.terminate()
process.join()
logging.info('leave websocket_endpoint')
if __name__ == '__main__':
# When using spawn you should guard the part that launches the job in if __name__ == '__main__':.
# `set_start_method` should also go there, and everything will run fine.
try:
set_start_method('spawn')
except RuntimeError as e:
print(e)
uvicorn.run('main:app', host='0.0.0.0', reload=True, log_level='debug',
ssl_keyfile=os.path.join(root, 'key.pem'),
ssl_certfile=os.path.join(root, 'cert.pem'))
|
compare_WalternatingCNOT_qng.py | import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
import multiprocessing
def run_walternating(num_layers, num_qubits):
thetas = np.ones(int(num_layers*num_qubits / 2) + 3 * num_layers * num_qubits)
psi = 2 * np.random.uniform(0, 2*np.pi, (2**num_qubits))
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_alternating: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
G = qtm.fubini_study.qng(qc.copy(), thetas, qtm.nqubit.create_WalternatingCNOT_layerd_state, num_layers)
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_WalternatingCNOT_layerd_state,
thetas, num_layers = num_layers)
thetas = np.real(thetas - qtm.constant.learning_rate*(np.linalg.pinv(G) @ grad_loss))
thetass.append(thetas.copy())
qc_copy = qtm.nqubit.create_WalternatingCNOT_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_WalternatingCNOT_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) +
' qubits')
np.savetxt("../../experiments/tomographyCNOT/tomography_walternating_" + str(num_layers) + "/" +
str(num_qubits) + "/loss_values_qng.csv",
loss_values,
delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_walternating_" + str(num_layers) + "/" +
str(num_qubits) + "/thetass_qng.csv",
thetass,
delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_walternating_" + str(num_layers) + "/" +
str(num_qubits) + "/traces_qng.csv",
traces,
delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_walternating_" + str(num_layers) + "/" +
str(num_qubits) + "/fidelities_qng.csv",
fidelities,
delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1, 2, 3, 4, 5]
num_qubits = [3, 4, 5]
t_walternatings = []
for i in num_layers:
for j in num_qubits:
t_walternatings.append(
multiprocessing.Process(target=run_walternating, args=(i, j)))
for t_walternating in t_walternatings:
t_walternating.start()
for t_walternating in t_walternatings:
t_walternating.join()
print("Done!") |
test_dbus.py | #!/usr/bin/python
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import tempfile
import time
from multiprocessing import Process
from mock_server import setup_mock_server
from utils.common import cleanup_mender_state
@pytest.mark.usefixtures("setup_board", "bitbake_path")
@pytest.mark.not_for_machine("vexpress-qemu-flash")
class TestDBus:
# this is a portion of the JWT token served by the Mender mock server:
# see: meta-mender-ci/recipes-testing/mender-mock-server/mender-mock-server.py
JWT_TOKEN = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9."
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_system_configuration_file(self, bitbake_variables, connection):
"""Test that D-Bus configuration file is correctly installed."""
output = connection.run(
"cat /usr/share/dbus-1/system.d/io.mender.AuthenticationManager.conf"
).stdout.strip()
assert "io.mender.AuthenticationManager" in output
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_non_root_access(self, bitbake_variables, connection):
"""Test that only root user can access Mender DBus API."""
# This is the command that is expected to fail for non-root user
dbus_send_command = "dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.GetJwtToken"
try:
connection.run("mender bootstrap", warn=True)
connection.run("systemctl start mender-client")
# Wait one state machine cycle for the D-Bus API to be available
for _ in range(12):
result = connection.run("journalctl -u mender-client")
if "Authorize failed:" in result.stdout:
break
time.sleep(5)
else:
pytest.fail("failed to detect a full state machine cycle")
result = connection.run(dbus_send_command)
assert "string" in result.stdout, result.stdout
result = connection.run(
"sudo -u mender-ci-tester %s" % dbus_send_command, warn=True
)
assert result.exited == 1
assert (
"Error org.freedesktop.DBus.Error.AccessDenied" in result.stderr
), result.stderr
finally:
connection.run("systemctl stop mender-client")
cleanup_mender_state(connection)
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_get_jwt_token(self, bitbake_variables, connection, setup_mock_server):
"""Test the JWT token can be retrieved using D-Bus."""
try:
# bootstrap the client
result = connection.run("mender bootstrap --forcebootstrap")
assert result.exited == 0
# start the mender-client service
result = connection.run("systemctl start mender-client")
assert result.exited == 0
# get the JWT token via D-Bus
output = ""
for i in range(12):
result = connection.run(
"dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.GetJwtToken || true"
)
if self.JWT_TOKEN in result.stdout:
output = result.stdout
break
time.sleep(5)
assert f'string "{self.JWT_TOKEN}' in output
finally:
connection.run("systemctl stop mender-client")
cleanup_mender_state(connection)
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_fetch_jwt_token(
self, bitbake_variables, connection, second_connection, setup_mock_server
):
"""Test the JWT token can be fetched using D-Bus."""
# bootstrap the client
result = connection.run("mender bootstrap --forcebootstrap")
assert result.exited == 0
try:
# start monitoring the D-Bus
def dbus_monitor():
second_connection.run(
"dbus-monitor --system \"type='signal',interface='io.mender.Authentication1'\" > /tmp/dbus-monitor.log"
)
p = Process(target=dbus_monitor, daemon=True)
p.start()
# get the JWT token via D-Bus
try:
# start the mender-client service
result = connection.run("systemctl start mender-client")
assert result.exited == 0
# fetch the JWT token
fetched = False
for i in range(12):
result = connection.run(
"dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.FetchJwtToken || true"
)
if "true" in result.stdout:
fetched = True
break
time.sleep(5)
# fetch was successful
assert fetched
# verify we received the D-Bus signal JwtTokenStateChange and that it contains the JWT token
found = False
output = ""
for i in range(12):
output = connection.run("cat /tmp/dbus-monitor.log").stdout.strip()
if (
"path=/io/mender/AuthenticationManager; interface=io.mender.Authentication1; member=JwtTokenStateChange"
in output
):
found = True
break
time.sleep(5)
assert found, output
# token is now available
result = connection.run(
"dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.GetJwtToken"
)
assert result.exited == 0
output = result.stdout.strip()
assert f'string "{self.JWT_TOKEN}' in output
finally:
p.terminate()
connection.run("systemctl stop mender-client")
connection.run("rm -f /tmp/dbus-monitor.log")
finally:
cleanup_mender_state(connection)
|
controller.py | """A pythonic Xbox 360 controller library for Linux.
Partly inspired by the following libraries/codes:
- https://www.freebasic.net/forum/viewtopic.php?t=18068 (libjoyrumble)
- https://gist.github.com/rdb/8864666
"""
import os
import select
import struct
import sys
import time
import warnings
from array import array
from collections import namedtuple
from fcntl import ioctl
from glob import glob
from threading import Thread, Event
from xbox360controller.linux.input import *
from xbox360controller.linux.input_event_codes import *
from xbox360controller.linux.joystick import *
LED_PERMISSION_WARNING = """Permission to the LED sysfs file was denied.
You may run this script as user root or try creating a udev rule containing:
SUBSYSTEM=="leds", RUN+="/bin/chmod 666 /sys/class/leds/%k/brightness"
E.g. in a file /etc/udev/rules.d/xpad.rules
"""
LED_SUPPORT_WARNING = """Setting the LED status is not supported for
this gamepad or its driver.
"""
BUTTON_NAMES = {
BTN_A: "BTN_A",
BTN_B: "BTN_B",
BTN_X: "BTN_X",
BTN_Y: "BTN_Y",
BTN_TL: "BTN_TL",
BTN_TR: "BTN_TR",
BTN_SELECT: "BTN_SELECT",
BTN_START: "BTN_START",
BTN_MODE: "BTN_MODE",
BTN_THUMBL: "BTN_THUMBL",
BTN_THUMBR: "BTN_THUMBR",
}
AXIS_NAMES = {
ABS_X: "ABS_X",
ABS_Y: "ABS_Y",
ABS_Z: "ABS_Z",
ABS_RX: "ABS_RX",
ABS_RY: "ABS_RY",
ABS_RZ: "ABS_RZ",
ABS_HAT0X: "ABS_HAT0X",
ABS_HAT0Y: "ABS_HAT0Y",
}
ControllerEvent = namedtuple("Event", ["time", "type", "number", "value", "is_init"])
def _get_uptime():
with open("/proc/uptime", "r") as f:
uptime_seconds = float(f.readline().split()[0])
return uptime_seconds
BOOT_TIME = time.time() - _get_uptime()
class RawAxis:
def __init__(self, name):
self.name = name
self._value = 0
self.when_moved = None
def __repr__(self):
return "<xbox360controller.{cls} ({name})>".format(
cls=self.__class__.__name__, name=self.name
)
@property
def value(self):
return self._value
def run_callback(self):
if self.when_moved is not None and callable(self.when_moved):
self.when_moved(self)
class Axis:
def __init__(self, name):
self.name = name
self._value_x = 0
self._value_y = 0
self.when_moved = None
def __repr__(self):
return "<xbox360controller.{cls} ({name})>".format(
cls=self.__class__.__name__, name=self.name
)
@property
def x(self):
return self._value_x
@property
def y(self):
return self._value_y
def run_callback(self):
if self.when_moved is not None and callable(self.when_moved):
self.when_moved(self)
class Button:
def __init__(self, name):
self.name = name
self._value = False
self.when_pressed = None
self.when_released = None
def __repr__(self):
return "<xbox360controller.{cls} ({name})>".format(
cls=self.__class__.__name__, name=self.name
)
@property
def is_pressed(self):
return bool(self._value)
class Xbox360Controller:
# https://github.com/paroj/xpad/blob/a6790a42800661d6bd658e9ba2215c0dc9bb2a44/xpad.c#L1355
LED_OFF = 0
LED_BLINK = 1
LED_TOP_LEFT_BLINK_ON = 2
LED_TOP_RIGHT_BLINK_ON = 3
LED_BOTTOM_LEFT_BLINK_ON = 4
LED_BOTTOM_RIGHT_BLINK_ON = 5
LED_TOP_LEFT_ON = 6
LED_TOP_RIGHT_ON = 7
LED_BOTTOM_LEFT_ON = 8
LED_BOTTOM_RIGHT_ON = 9
LED_ROTATE = 10
LED_BLINK_PREV = 11
LED_BLINK_SLOW_PREV = 12
LED_ROTATE_TWO = 13
LED_BLINK_SLOW = 14
LED_BLINK_ONCE_PREV = 15
@classmethod
def get_available(cls):
return [cls(index) for index in range(len(glob("/dev/input/js*")))]
def __init__(self, index=0, axis_threshold=0.2, raw_mode=False, event_timeout=1.0):
self.index = index
self.axis_threshold = axis_threshold
self.raw_mode = raw_mode
self.event_timeout = event_timeout
self._ff_id = -1
try:
self._dev_file = open(self._get_dev_file(), "rb")
except FileNotFoundError:
raise Exception(
"controller device with index {index} "
"was not found!".format(index=index)
)
self._event_file = open(self._get_event_file(), "wb")
self._led_file = None
try:
self._led_file = open(self._get_led_file(), "w")
except PermissionError:
warnings.warn(LED_PERMISSION_WARNING, UserWarning)
except FileNotFoundError:
warnings.warn(LED_SUPPORT_WARNING, UserWarning)
if raw_mode:
self.axes = self._get_axes()
self.buttons = self._get_buttons()
else:
self.axis_l = Axis("axis_l")
self.axis_r = Axis("axis_r")
self.hat = Axis("hat")
self.trigger_l = RawAxis("trigger_l")
self.trigger_r = RawAxis("trigger_r")
self.axes = [
self.axis_l,
self.axis_r,
self.hat,
self.trigger_l,
self.trigger_r,
]
self.button_a = Button("button_a")
self.button_b = Button("button_b")
self.button_x = Button("button_x")
self.button_y = Button("button_y")
self.button_trigger_l = Button("button_trigger_l")
self.button_trigger_r = Button("button_trigger_r")
self.button_select = Button("button_select")
self.button_start = Button("button_start")
self.button_mode = Button("button_mode")
self.button_thumb_l = Button("button_thumb_l")
self.button_thumb_r = Button("button_thumb_r")
self.buttons = [
self.button_a,
self.button_b,
self.button_x,
self.button_y,
self.button_trigger_l,
self.button_trigger_r,
self.button_select,
self.button_start,
self.button_mode,
self.button_thumb_l,
self.button_thumb_r,
]
self._event_thread_stopped = Event()
self._event_thread = Thread(target=self._event_loop)
self._event_thread.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_dev_file(self):
return "/dev/input/js{idx}".format(idx=self.index)
def _get_event_file(self):
event_file_sysfs = glob(
"/sys/class/input/js{idx}/device/event*".format(idx=self.index)
)[0]
event_number = int(os.path.basename(event_file_sysfs).replace("event", ""))
return "/dev/input/event{number}".format(number=event_number)
def _get_led_file(self):
return "/sys/class/leds/xpad{idx}/brightness".format(idx=self.index)
def _get_axes(self):
axes = []
buf = array("B", [0] * 64)
ioctl(self._dev_file, JSIOCGAXMAP, buf)
for axis in buf[: self.num_axes]:
name = AXIS_NAMES.get(axis)
if name is not None:
name = name.lower()
setattr(self, name, RawAxis(name))
axes.append(getattr(self, name))
return axes
def _get_buttons(self):
buttons = []
buf = array("H", [0] * 200)
ioctl(self._dev_file, JSIOCGBTNMAP, buf)
for button in buf[: self.num_buttons]:
name = BUTTON_NAMES.get(button)
if name is not None:
name = name.lower()
setattr(self, name, Button(name))
buttons.append(getattr(self, name))
return buttons
def _event_loop(self):
while not self._event_thread_stopped.is_set():
event = self.get_event()
if event is not None and not event.is_init:
self.process_event(event)
def get_event(self):
try:
r, w, e = select.select([self._dev_file], [], [], self.event_timeout)
if self._dev_file in r:
buf = self._dev_file.read(8)
else:
return
except ValueError:
# File closed in main thread
return
else:
if buf:
time_, value, type_, number = struct.unpack("IhBB", buf)
time_ = round(BOOT_TIME + (time_ / 1000), 4)
is_init = bool(type_ & JS_EVENT_INIT)
return ControllerEvent(
time=time_, type=type_, number=number, value=value, is_init=is_init
)
def axis_callback(self, axis, val):
if (
axis.when_moved is not None
and abs(val) > self.axis_threshold
and callable(axis.when_moved)
):
axis.when_moved(axis)
def process_event(self, event):
if event.type == JS_EVENT_BUTTON:
if event.number >= 11 and event.number <= 14:
if event.number == 11:
self.hat._value_x = -int(event.value)
val = self.hat._value_x
if event.number == 12:
self.hat._value_x = int(event.value)
val = self.hat._value_x
if event.number == 13:
self.hat._value_y = int(event.value)
val = self.hat._value_y
if event.number == 14:
self.hat._value_y = -int(event.value)
val = self.hat._value_y
self.axis_callback(self.hat, val)
try:
button = self.buttons[event.number]
except IndexError:
return
else:
button._value = event.value
if (
button._value
and button.when_pressed is not None
and callable(button.when_pressed)
):
button.when_pressed(button)
if (
not button._value
and button.when_released is not None
and callable(button.when_released)
):
button.when_released(button)
if event.type == JS_EVENT_AXIS:
if self.raw_mode:
try:
axis = self.axes[event.number]
except IndexError:
return
else:
val = event.value / 32767
axis._value = val
else:
num = event.number
val = event.value / 32767
if num == 0:
self.axis_l._value_x = val
if num == 1:
self.axis_l._value_y = val
if num == 2:
self.trigger_l._value = (val + 1) / 2
if num == 3:
self.axis_r._value_x = val
if num == 4:
self.axis_r._value_y = val
if num == 5:
self.trigger_r._value = (val + 1) / 2
if num == 6:
self.hat._value_x = int(val)
if num == 7:
self.hat._value_y = int(val * -1)
axis = [
self.axis_l,
self.axis_l,
self.trigger_l,
self.axis_r,
self.axis_r,
self.trigger_r,
self.hat,
self.hat,
][num]
self.axis_callback(axis, val)
@property
def driver_version(self):
buf = array("i", [0])
ioctl(self._dev_file, JSIOCGVERSION, buf)
version_dev = struct.unpack("i", buf.tobytes())[0]
version_dev = (version_dev >> 16, (version_dev >> 8) & 0xFF, version_dev & 0xFF)
buf = array("i", [0])
ioctl(self._event_file, EVIOCGVERSION, buf)
version_ev = struct.unpack("i", buf.tobytes())[0]
version_ev = (version_ev >> 16, (version_ev >> 8) & 0xFF, version_ev & 0xFF)
return version_dev, version_ev
@property
def num_axes(self):
if self.raw_mode:
buf = array("B", [0] * 64)
ioctl(self._dev_file, JSIOCGAXES, buf)
return int(buf[0])
else:
return len(self.axes)
@property
def num_buttons(self):
if self.raw_mode:
buf = array("B", [0])
ioctl(self._dev_file, JSIOCGBUTTONS, buf)
return int(buf[0])
else:
return len(self.buttons)
@property
def name(self):
buf = array("B", [0] * 64)
ioctl(self._dev_file, JSIOCGNAME(len(buf)), buf)
return buf.tostring().decode()
def info(self):
print("{0} at index {1}".format(self.name, self.index))
print("Axes: {0}".format(self.num_axes))
for axis in self.axes:
print("\t{}".format(axis.name))
print("Buttons: {0}".format(self.num_buttons))
for button in self.buttons:
print("\t{}".format(button.name))
print("Rumble: {0}".format("yes" if self.has_rumble else "no"))
print(
"Driver version: {0}".format(
" ".join("{}.{}.{}".format(*ver) for ver in self.driver_version)
)
)
@property
def has_rumble(self):
buf = array("L", [0] * 4)
has_ff = EVIOCGBIT(EV_FF, struct.calcsize("L") * len(buf))
if ioctl(self._event_file, has_ff, buf) == -1:
return False
if (buf[1] >> FF_RUMBLE % (struct.calcsize("l") * 8)) & 1:
return True
return False
def set_rumble(self, left, right, duration=1000):
if not self.has_rumble:
raise RuntimeError("this device doesn't support rumbling")
if not (1 >= left >= 0 and 1 >= right >= 0):
raise ValueError("left and right must be in range 0-1")
if duration <= 0:
raise ValueError("duration must be greater than 0")
left_abs = int(left * 65535)
right_abs = int(right * 65535)
stop = input_event(EV_FF, self._ff_id, 0)
if self._event_file.write(stop) == -1:
return False
self._event_file.flush()
effect = ff_effect(FF_RUMBLE, -1, duration, 0, left_abs, right_abs)
try:
buf = ioctl(self._event_file, EVIOCSFF, effect)
except OSError:
# Heavy usage yields a
# [Errno 28] No space left on device
# Simply reset and continue rumbling :)
self._ff_id = -1
self._event_file.close()
self._event_file = open(self._get_event_file(), "wb")
return self.set_rumble(left, right, duration)
self._ff_id = int.from_bytes(buf[1:3], "big")
play = input_event(EV_FF, self._ff_id, 1)
if self._event_file.write(play) == -1:
return False
self._event_file.flush()
return True
@property
def has_led(self):
return self._led_file is not None
def set_led(self, status):
if not self.has_led:
raise RuntimeError("setting the LED status is not supported")
if 0 > status > 15:
raise ValueError("status must be in range 0-15")
self._led_file.write(str(status))
self._led_file.flush()
def close(self):
self._dev_file.close()
self._event_file.close()
if self._led_file is not None:
self._led_file.close()
self._event_thread_stopped.set()
self._event_thread.join()
|
machine_unit.py | #!/usr/bin/env python
#
# provide host/guest conatainer manager
#
import os
import abc
import time
import shutil
import pathlib
import difflib
import threading
from dataclasses import dataclass
from arkon_config import host_user
from arkon_config import has_ci_azure
from arkon_config import project_repo
from arkon_config import project_boot
from arkon_config import project_data
from sysroot_media import SimpleSysroot
# well known system path
linux_kernel = f"/boot/vmlinuz-linux"
# well known system path
linux_initrd = f"/boot/initramfs-linux.img"
# location of this file
this_dir = os.path.dirname(os.path.abspath(__file__))
class HostAny(abc.ABC):
"conatainer manager prototype"
def __init__(self, guest_name:str, sysroot_path:str) -> None:
self.guest_name = guest_name
self.sysroot_path = sysroot_path
@abc.abstractmethod
def command_initiate(self) -> str:
"start guest instance"
@abc.abstractmethod
def command_terminate(self) -> str:
"finish guest instance"
# FIXME barely usable
# https://wiki.archlinux.org/index.php/systemd-nspawn#Run_docker_in_systemd-nspawn
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#System%20Call%20Filtering
# https://github.com/systemd/systemd/blob/master/units/systemd-nspawn%40.service.in
class HostSYSD(HostAny):
"systemd-nspawn container host"
def command_initiate(self) -> str:
proc_cmdline = (
f"TERM=xterm "
# "systemd.log_level=debug "
# "systemd.log_target=console "
# "systemd.journald.forward_to_console=1 "
)
return (
f"sudo "
#
# elevate privilege
f"SYSTEMD_NSPAWN_LOCK=0 "
f"SYSTEMD_NSPAWN_USE_CGNS=0 "
f"SYSTEMD_NSPAWN_API_VFS_WRITABLE=1 "
#
f"systemd-nspawn "
#
# elevate privilege
f"--capability=CAP_MKNOD "
f"--system-call-filter='@mount @keyring @privileged' "
#
f"--bind=/dev/disk "
f"--bind=/dev/block "
f"--bind=/dev/mapper "
f"--bind=/dev/loop-control "
f"--bind=/dev/loop7 " # sysroot.disk
#
f"--property='DevicePolicy=auto' "
f"--property='DeviceAllow=/dev/loop-control rw' "
f"--property='DeviceAllow=block-loop rw' "
f"--property='DeviceAllow=block-blkext rw' "
f"--property='DeviceAllow=/dev/mapper/control rw' "
f"--property='DeviceAllow=block-device-mapper rw' "
#
f"-E SYSTEMD_COLORS=0 " # suppress colors
f"-E SYSTEMD_IN_INITRD=1 " # imitate initramfs
f"-E SYSTEMD_PROC_CMDLINE='{proc_cmdline}' " # imitate kernel command line
f"-D {project_data} " # image folder
f"-M {self.guest_name} " # container name
f"/init " # /init is /usr/lib/systemd/systemd
)
def command_terminate(self) -> str:
return (
f"sudo SYSTEMD_COLORS=0 "
f"machinectl terminate {self.guest_name} "
)
# note: ensure virtio drivers are present in the guest
class HostQEMU(HostAny):
"quemu container host"
command = "qemu-system-x86_64"
kernel = f"{project_repo}{linux_kernel}"
initrd = f"{project_repo}{linux_initrd}"
link_addr = "52:54:12:34:56:78"
monitor_addr = "127.0.0.1"
monitor_port = "51234"
def has_manager(self) -> bool:
"detect quemu manager present"
return shutil.which(self.command) is not None
def has_kernel_kvm(self) -> bool:
"detect kernel has kvm support"
return os.path.exists("/dev/kvm")
def command_action(self, action:str) -> str:
return (
f"printf '{action}\n' | telnet {self.qemu_monitor_addr} {self.qemu_monitor_port} "
)
def command_initiate(self) -> str:
qemu_cpu_mode = f"-cpu host -enable-kvm " if self.has_kernel_kvm() else ""
return (
f"sudo "
f"{self.command} "
f"{qemu_cpu_mode} "
f"-name {self.guest_name} "
f"-runas {host_user} "
f"-kernel {self.kernel} "
f"-initrd {self.initrd} "
f"-m 512 -smp 2 "
f"-device e1000,netdev=net0,mac={self.link_addr} "
f"-netdev user,id=net0,net=192.168.123.0/24,hostfwd=tcp::22022-:22 "
# TODO
# f"-device virtio-net,netdev=net1 "
# f"-netdev tap,id=net1,ifname=QTAP,script=no,downscript=no "
f"-drive if=virtio,cache=none,format=raw,file={self.sysroot_path} "
f"-append 'edd=off console=ttyS0 TERM=xterm SYSTEMD_COLORS=0' "
f"-nographic -serial mon:stdio "
f"-monitor telnet:{self.monitor_addr}:{self.monitor_port},server,nowait "
)
def command_terminate(self) -> str:
# FIXME use self.guest_name
return f"sudo killall {self.command}"
class Support:
service_path_list_udev = [
"/etc/systemd/system/systemd-udevd.service",
"/etc/systemd/system/systemd-udevd-control.socket",
"/etc/systemd/system/systemd-udevd-kernel.socket",
]
@classmethod
def service_mask(cls, service_path:str) -> None:
print(f"### service: prohibit {service_path}")
os.system(f"sudo ln -s /dev/null {project_data}/{service_path}")
@classmethod
def service_mask_list(cls, service_path_list:list) -> None:
for service_path in service_path_list:
cls.service_mask(service_path)
@dataclass
class MachineUnit:
"container host/guest operator"
machine:str # container name
image_root:str # absolute path to resource folder
def __post_init__(self):
if has_ci_azure():
print(f"### settle machine state")
time.sleep(3)
self.sysroot = SimpleSysroot()
self.host_qemu = HostQEMU(self.booter_machine, self.sysroot.disk_path)
self.host_sysd = HostSYSD(self.booter_machine, self.sysroot.disk_path)
@property
def test_base(self) -> str:
"location of test resources"
return f"{self.image_root}/test_base"
# https://www.freedesktop.org/software/systemd/man/systemd-run.html
def run(self, command:str, machine:str=None) -> None:
"invoke command inside machine"
if machine is None:
machine = self.machine
invoke = f"sudo systemd-run --wait -G -P -M {machine} {command}"
result = os.system(invoke)
assert result == 0, f"result: {result}, command: {command}"
def report_machine(self) -> None:
print(f"### report active machines")
os.system(f"sudo machinectl --all --full")
def install_this_tool(self) -> None:
print(f"### install systemd-tool")
self.run(f"/repo/tool/module/manual-install.sh")
def service_enable(self, service:str) -> None:
print(f"### service enable : {service}")
self.run(f"/usr/bin/systemctl enable {service}")
def service_disable(self, service:str) -> None:
print(f"### service disable: {service}")
self.run(f"/usr/bin/systemctl disable {service}")
def service_mask(self, service:str) -> None:
print(f"### service mask : {service}")
self.run(f"/usr/bin/systemctl mask {service}")
def service_unmask(self, service:str) -> None:
print(f"### service unmask: {service}")
self.run(f"/usr/bin/systemctl unmask {service}")
def service_enable_list(self, service_list:list) -> None:
for service in service_list:
self.service_enable(service)
def service_disable_list(self, service_list:list) -> None:
for service in service_list:
self.service_disable(service)
def share_folder_clear(self):
print(f"### share folder clear")
os.system(f"sudo rm -rf {project_boot}/*")
os.system(f"sudo rm -rf {project_data}/*")
def share_folder_review(self):
print(f"### share folder review")
os.system(f"ls -las {project_boot}")
os.system(f"ls -las {project_data}")
def initrd_image_produce(self):
print(f"### produce machine initrd")
self.run(f"/usr/bin/mkinitcpio -p linux")
def initrd_image_extract(self):
print(f"### extract machine initrd")
self.run(f"/usr/bin/cp -f {linux_kernel} {linux_initrd} /repo/boot/")
self.run(f"/usr/bin/bash -c 'cd /repo/data; lsinitcpio -x {linux_initrd}' ")
os.system(f"sudo chown -R {host_user}:{host_user} {project_boot} ")
os.system(f"sudo chown -R {host_user}:{host_user} {project_data} ")
def perform_make_boot(self) -> None:
"produce boot image extract on the host"
self.report_machine()
self.share_folder_clear()
self.initrd_image_produce()
self.initrd_image_extract()
self.share_folder_review()
def assert_has_link(self, path:str) -> None:
print(f"### assert link present: {path}")
full_path = f"{project_data}/{path}"
assert pathlib.Path(full_path).is_symlink(), f"no link: {full_path}"
def assert_has_path(self, path:str) -> None:
print(f"### assert path present: {path}")
full_path = f"{project_data}/{path}"
assert pathlib.Path(full_path).exists(), f"no path: {full_path}"
def assert_has_text(self, path:str) -> None:
print(f"### assert text matches: {path}")
boot_path = f"{project_data}/{path}"
test_path = f"{self.test_base}/{path}"
boot_list = pathlib.Path(boot_path).read_text().split("\n")
test_list = pathlib.Path(test_path).read_text().split("\n")
diff_list = difflib.unified_diff(test_list, boot_list, lineterm='')
diff_text = "\n".join(diff_list)
assert boot_list == test_list, f"no match:\n{diff_text}\n"
def assert_has_link_list(self, path_list:list) -> None:
for path in path_list:
self.assert_has_link(path)
def assert_has_path_list(self, path_list:list) -> None:
for path in path_list:
self.assert_has_path(path)
def assert_has_text_list(self, path_list:list) -> None:
for path in path_list:
self.assert_has_text(path)
@property
def booter_machine(self) -> str:
"name of boot container instance"
return f"{self.machine}-boot"
def booter_run(self, command:str) -> None:
"invoke command inside booter"
self.run(command, self.booter_machine)
def booter_disable_udev(self) -> None:
print(f"### booter: disable udev")
Support.service_mask_list(Support.service_path_list_udev)
def booter_ensure_loop(self):
print(f"### booter: ensure loop")
os.system("sudo modprobe loop")
os.system("sudo losetup")
def booter_sysd_prepare(self):
print(f"### booter: sysd: prepare")
self.booter_ensure_loop()
self.booter_disable_udev()
def booter_initiate_thread(self, command:str) -> None:
"start process in parallel"
print(command)
def booter_task() -> None:
try:
print(f"### booter start : {self.booter_machine}")
os.system(command)
finally:
print(f"### booter finish : {self.booter_machine}")
booter_thread = threading.Thread(target=booter_task)
booter_thread.setDaemon(True)
booter_thread.start()
def booter_sysd_initiate(self) -> None:
self.booter_sysd_prepare()
print(f"### initrd image: sysd: activate")
command = self.host_sysd.command_initiate()
self.booter_initiate_thread(command)
# mavual stop: keyboard terminate: CTRL+]]]
def booter_sysd_terminate(self) -> None:
print()
print(f"### initrd image: sysd: deactivate")
command = self.host_sysd.command_terminate()
os.system(command)
# FIXME "Failed to create bus connection: Protocol error"
def booter_report_generator(self) -> None:
print(f"### report generated units")
self.booter_run(f"/usr/lib/systemd/system-generators/systemd-cryptsetup-generator")
self.booter_run(f"/usr/lib/systemd/system-generators/systemd-fstab-generator")
time.sleep(1)
self.booter_run(f"/usr/bin/ls -las /run/systemd/generator")
def booter_qemu_action(self, action:str) -> None:
print(f"### initrd image: action: {action}")
command = self.host_qemu.command_action(action)
print(command)
os.system(command)
def booter_qemu_initiate(self) -> None:
if not self.host_qemu.has_manager():
return
print()
self.sysroot.produce_media()
print(f"### initrd image: qemu: activate")
# TODO
# os.system(f"{this_dir}/qemu-tap-activate.sh")
command = self.host_qemu.command_initiate()
self.booter_initiate_thread(command)
# manual stop: keyboard terminate: CTRL+A then X
def booter_qemu_terminate(self) -> None:
if not self.host_qemu.has_manager():
return
print()
print(f"### initrd image: qemu: deactivate")
command = self.host_qemu.command_terminate()
os.system(command)
# TODO
# os.system(f"{this_dir}/qemu-tap-deactivate.sh")
|
wikiConverter.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 18:56:24 2019
@author: descentis
"""
import os
from multiprocessing import Process, Lock
import time
import numpy as np
import glob
import difflib
import xml.etree.ElementTree as ET
import math
import textwrap
import html
import requests
import io
class wikiConverter(object):
instance_id = 1
def indent(self,elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
@staticmethod
def wiki_file_writer(elem,myFile,prefix):
global instance_id
t = '\t'
Instance = t+t+"<Instance "
for ch_elem in elem:
if(('id' in ch_elem.tag) and ('parentid' not in ch_elem.tag)):
Instance = Instance+ "Id="+'"'+str(wikiConverter.instance_id)+'"'+" InstanceType="+'"'+"Revision/Wiki"+'"'+" RevisionId="+ '"'+str(ch_elem.text)+'"'+">\n"
myFile.write(Instance)
'''
RevisionId = t+t+t+"<RevisionId>"+ch_elem.text+"</RevisionId>\n"
myFile.write(RevisionId)
'''
'''
if(ch_elem.tag==prefix+'parentid'):
ParentId = t+t+t+"<ParentId>"+ch_elem.text+"</ParentId>\n"
myFile.write(ParentId)
'''
'''
Timestamp Information
'''
if('timestamp' in ch_elem.tag):
'''
if(f_p!=1):
Instance = Instance+" InstanceType= "+'"'+"wiki/text"+'"'+">\n"
myFile.write(Instance)
'''
Timestamp = t+t+t+"<TimeStamp>\n"
myFile.write(Timestamp)
CreationDate = t+t+t+t+"<CreationDate>"+ch_elem.text[:-1]+'.0'+"</CreationDate>\n"
myFile.write(CreationDate)
Timestamp = t+t+t+"</TimeStamp>\n"
myFile.write(Timestamp)
'''
Contributors information
'''
if('contributor' in ch_elem.tag):
Contributors = t+t+t+"<Contributors>\n"
myFile.write(Contributors)
for contrib in ch_elem:
if('ip' in contrib.tag):
LastEditorUserName = t+t+t+t+"<OwnerUserName>"+html.escape(contrib.text)+"</OwnerUserName>\n"
myFile.write(LastEditorUserName)
else:
if('username' in contrib.tag):
LastEditorUserName = t+t+t+t+"<OwnerUserName>"+html.escape(contrib.text)+"</OwnerUserName>\n"
myFile.write(LastEditorUserName)
if(('id' in contrib.tag) and ('parentid' not in contrib.tag)):
LastEditorUserId = t+t+t+t+"<OwnerUserId>"+contrib.text+"</OwnerUserId>\n"
myFile.write(LastEditorUserId)
Contributors = t+t+t+"</Contributors>\n"
myFile.write(Contributors)
'''
Body/Text Information
'''
if('text' in ch_elem.tag):
Body = t+t+t+"<Body>\n"
myFile.write(Body)
if(ch_elem.attrib.get('bytes')!=None):
text_field = t+t+t+t+"<Text Type="+'"'+"wiki/text"+'"'+" Bytes="+'"'+ch_elem.attrib['bytes']+'">\n'
elif(ch_elem.text != None):
text_field = t+t+t+t+"<Text Type="+'"'+"wiki/text"+'"'+" Bytes="+'"'+str(len(ch_elem.text))+'">\n'
else:
text_field = t+t+t+t+"<Text Type="+'"'+"wiki/text"+'"'+" Bytes="+'"'+str(0)+'">\n'
myFile.write(text_field)
if(ch_elem.text == None):
text_body = "";
else:
text_body = textwrap.indent(text=ch_elem.text, prefix=t+t+t+t+t)
text_body = html.escape(text_body)
Body_text = text_body+"\n"
myFile.write(Body_text)
text_field = t+t+t+t+"</Text>\n"
myFile.write(text_field)
Body = t+t+t+"</Body>\n"
myFile.write(Body)
if('comment' in ch_elem.tag):
Edit = t+t+t+"<EditDetails>\n"
myFile.write(Edit)
if(ch_elem.text == None):
text_body = "";
else:
text_body = textwrap.indent(text=ch_elem.text, prefix=t+t+t+t+t)
text_body = html.escape(text_body)
EditType = t+t+t+t+"<EditType>\n"+text_body+"\n"+t+t+t+t+"</EditType>\n"
#Body_text = text_body+"\n"
myFile.write(EditType)
Edit = t+t+t+"</EditDetails>\n"
myFile.write(Edit)
if('sha1' in ch_elem.tag):
sha = ch_elem.text
if(type(sha)!=type(None)):
shaText = t+t+t+'<data key="sha">'+sha+'</data>\n'
myFile.write(shaText)
else:
shaText = ''
Instance = t+t+"</Instance>\n"
myFile.write(Instance)
wikiConverter.instance_id+=1
@staticmethod
def wiki_knml_converter(name):
#global instance_id
#Creating a meta file for the wiki article
# To get an iterable for wiki file
file_name = name
context_wiki = ET.iterparse(file_name, events=("start","end"))
# Turning it into an iterator
context_wiki = iter(context_wiki)
# getting the root element
event_wiki, root_wiki = next(context_wiki)
file_name = name[:-4]+'.knolml'
file_path = file_name
with open(file_path,"w",encoding='utf-8') as myFile:
myFile.write("<?xml version='1.0' encoding='utf-8'?>\n")
myFile.write("<KnolML>\n")
myFile.write('<key attr.name="sha" attrib.type="string" for="Instance" id="sha"/>\n')
prefix = '{http://www.mediawiki.org/xml/export-0.10/}' #In case of Wikipedia, prefic is required
f = 0
title_text = ''
for event, elem in context_wiki:
if event == "end" and 'id' in elem.tag:
if(f==0):
with open(file_path,"a",encoding='utf-8') as myFile:
myFile.write("\t<KnowledgeData "+"Type="+'"'+"Wiki/text/revision"+'"'+" Id="+'"'+elem.text+'"'+">\n")
f=1
if event == "end" and 'title' in elem.tag:
title_text = elem.text
if(f==1 and title_text!=None):
Title = "\t\t<Title>"+title_text+"</Title>\n"
with open(file_path,"a",encoding='utf-8') as myFile:
myFile.write(Title)
title_text = None
if event == "end" and 'revision' in elem.tag:
with open(file_path,"a",encoding='utf-8') as myFile:
wikiConverter.wiki_file_writer(elem,myFile,prefix)
elem.clear()
root_wiki.clear()
with open(file_path,"a",encoding='utf-8') as myFile:
myFile.write("\t</KnowledgeData>\n")
myFile.write("</KnolML>\n")
wikiConverter.instance_id = 1
@staticmethod
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
@staticmethod
def encode(str1, str2):
output = ""
s = [x.replace("\n", "`").replace("-", "^") for x in str1.split(" ")]
s2 = [x.replace("\n", "`").replace("-", "^") for x in str2.split(" ")]
i = 0
while(True):
if i == len(s):
break;
if s[i].isspace() or s[i] == '':
del s[i]
else:
i += 1
i = 0
while(True):
if i == len(s2):
break;
if s2[i].isspace() or s2[i] == '':
del s2[i]
else:
i += 1
d = difflib.Differ()
result = list(d.compare(s, s2))
pos = 0
neg = 0
for x in result:
if x[0] == " ":
pos += 1
if neg != 0:
output += "-"+str(neg)+" "
neg = 0
elif x[0] == "-":
neg += 1
if pos != 0:
output += str(pos)+" "
pos = 0
elif x[0] != "?":
if pos != 0:
output += str(pos)+" "
pos = 0
if neg != 0:
output += "-"+str(neg)+" "
neg = 0
if wikiConverter.is_number(x[2:]):
output += "'"+x[2:]+"' "
else:
output += x[2:]+" "
if pos != 0:
output += str(pos)+" "
if neg != 0:
output += "-"+str(neg)+" "
return output.replace("\t\t\t", "")
#Main function
@staticmethod
def compress(file_name, directory):
# file_name = input("Enter path of KML file:")
tree = ET.parse(file_name)
r = tree.getroot()
for child in r:
if('KnowledgeData' in child.tag):
root = child
last_rev = ""
length = len(root.findall('Instance'))
print(length, "revisions found")
count = 0
intervalLength = int((math.log(length)) ** 2);
# Keep the Orginal text after every 'm' revisions
m = intervalLength+1
for each in root.iter('Text'):
count += 1
if m != intervalLength+1:
current_str = each.text
each.text = wikiConverter.encode(prev_str, current_str)
prev_str = current_str
# print("Revision ", count, " written")
m = m - 1
if m == 0:
m = intervalLength+1
else:
prev_str = each.text
# print("Revision ", count, " written")
m = m - 1
continue
print("KNML file created")
# Creating directory
if not os.path.exists(directory):
os.mkdir(directory)
# Changing file path to include directory
file_name = file_name.split('/')
file_name = directory+'/'+file_name[-1]
'''
file_name.insert(-1, directory)
separator = '/'
file_name = separator.join(file_name)
'''
tree.write(file_name[:-7]+'Compressed.knolml')
f = open(file_name[:-7]+'Compressed.knolml')
f_str = f.read()
f.close()
f2 = open(file_name[:-7]+'Compressed.knolml', "w")
f2.write("<?xml version='1.0' encoding='utf-8'?>\n"+f_str)
f2.close()
@staticmethod
def wikiConvert(*args, **kwargs):
if(kwargs.get('output_dir')!=None):
output_dir = kwargs['output_dir']
if(kwargs.get('file_name')!=None):
file_name = kwargs['file_name']
wikiConverter.wiki_knml_converter(file_name)
file_name = file_name[:-4] + '.knolml'
wikiConverter.compress(file_name,output_dir)
os.remove(file_name)
if(kwargs.get('file_list')!=None):
path_list = kwargs['file_list']
for file_name in path_list:
wikiConverter.wiki_knml_converter(file_name)
file_name = file_name[:-4] + '.knolml'
wikiConverter.compress(file_name,output_dir)
os.remove(file_name)
if((kwargs.get('file_name')==None) and (kwargs.get('file_list')==None)):
print("No arguments provided")
def returnList(self, l, n):
for i in range(0,len(l),n):
yield l[i:i+n]
@staticmethod
def compressAll(dir_path, *args, **kwargs):
t1 = time.time()
if(kwargs.get('c_num')!=None):
c_num = kwargs['c_num']
else:
c_num = 24 # By default it is 24
fileNames = glob.glob(dir_path+'/*.xml')
if(kwargs.get('output_dir')!=None):
output_dir=kwargs['output_dir']
else:
output_dir = os.getcwd()
fileNum = len(fileNames)
fileList = []
if(fileNum<c_num):
for f in fileNames:
fileList.append([f])
else:
f = np.array_split(fileNames,c_num)
for i in f:
fileList.append(i.tolist())
l = Lock()
processDict = {}
if(fileNum<c_num):
pNum = fileNum
else:
pNum = c_num
for i in range(pNum):
processDict[i+1] = Process(target=wikiConverter.wikiConvert,kwargs={'output_dir':output_dir,'file_list': fileList[i],'l': l})
for i in range(pNum):
processDict[i+1].start()
for i in range(pNum):
processDict[i+1].join()
t2 = time.time()
print("All process done with time: ",str(t2-t1))
@staticmethod
def getArticle(*args, **kwargs):
# articleName = raw_input()
# articleName = articleName.replace(' ', '_')
featuredArticleList = []
if(kwargs.get('file_name')!=None):
featuredArticleList.append(kwargs['file_name'])
if(kwargs.get('file_list')!=None):
featuredArticleList = kwargs['file_list']
if(kwargs.get('output_dir')!=None):
output_dir = kwargs['output_dir']+'/'
else:
output_dir = ''
for each in featuredArticleList:
articleName = each
file_handler = io.open(output_dir+articleName+'.xml', mode='w+', encoding='utf-8')
url = 'https://en.m.wikipedia.org/w/index.php?title=Special:Export&pages=' + articleName + '&history=1&action=submit'
headers = {
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Mobile Safari/537.36'
}
print('Downloading ' + articleName + '...')
r = requests.get(url, headers=headers)
if r.status_code == 200:
xml = r.text
file_handler.write(xml)
print(articleName,'Completed!')
else:
print('Something went wrong! ' + articleName + '\n' + '\n')
file_handler.close()
@staticmethod
def serialCompress(self,dir_path, *args, **kwargs):
t1 = time.time()
file_list = os.listdir(dir_path)
path_list = []
if(kwargs.get('output_dir')!=None):
output_dir=kwargs['output_dir']
else:
output_dir = os.getcwd()
for f in file_list:
path_list.append(dir_path+'/'+f)
self.convert(path_list,output_dir=output_dir)
t2 = time.time()
print("all process done: ",str(t2-t1)) |
base.py | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from settings import dckr
import io
import os
from itertools import chain
from threading import Thread
import netaddr
import sys
import time
import datetime
flatten = lambda l: chain.from_iterable(l)
def get_ctn_names():
names = list(flatten(n['Names'] for n in dckr.containers(all=True)))
return [n[1:] if n[0] == '/' else n for n in names]
def ctn_exists(name):
return name in get_ctn_names()
def img_exists(name):
return name in [ctn['RepoTags'][0].split(':')[0] for ctn in dckr.images() if ctn['RepoTags'] != None]
def rm_line():
print('\x1b[1A\x1b[2K\x1b[1D\x1b[1A')
class Container(object):
def __init__(self, name, image, host_dir, guest_dir, conf):
self.name = name
self.image = image
self.host_dir = host_dir
self.guest_dir = guest_dir
self.conf = conf
self.config_name = None
self.stop_monitoring = False
if not os.path.exists(host_dir):
os.makedirs(host_dir)
os.chmod(host_dir, 0o777)
@classmethod
def build_image(cls, force, tag, nocache=False):
def insert_after_from(dockerfile, line):
lines = dockerfile.split('\n')
i = -1
for idx, l in enumerate(lines):
elems = [e.strip() for e in l.split()]
if len(elems) > 0 and elems[0] == 'FROM':
i = idx
if i < 0:
raise Exception('no FROM statement')
lines.insert(i+1, line)
return '\n'.join(lines)
for env in ['http_proxy', 'https_proxy']:
if env in os.environ:
cls.dockerfile = insert_after_from(cls.dockerfile, 'ENV {0} {1}'.format(env, os.environ[env]))
f = io.BytesIO(cls.dockerfile.encode('utf-8'))
if force or not img_exists(tag):
print('build {0}...'.format(tag))
for line in dckr.build(fileobj=f, rm=True, tag=tag, decode=True, nocache=nocache):
if 'stream' in line:
print(line['stream'].strip())
def get_ipv4_addresses(self):
if 'local-address' in self.conf:
local_addr = self.conf['local-address']
return [local_addr]
raise NotImplementedError()
def get_host_config(self):
host_config = dckr.create_host_config(
binds=['{0}:{1}'.format(os.path.abspath(self.host_dir), self.guest_dir)],
privileged=True,
network_mode='bridge',
cap_add=['NET_ADMIN']
)
return host_config
def run(self, dckr_net_name='', rm=True):
if rm and ctn_exists(self.name):
print('remove container:', self.name)
dckr.remove_container(self.name, force=True)
host_config = self.get_host_config()
ctn = dckr.create_container(image=self.image, entrypoint='bash', detach=True, name=self.name,
stdin_open=True, volumes=[self.guest_dir], host_config=host_config)
self.ctn_id = ctn['Id']
ipv4_addresses = self.get_ipv4_addresses()
net_id = None
for network in dckr.networks(names=[dckr_net_name]):
if network['Name'] != dckr_net_name:
continue
net_id = network['Id']
if not 'IPAM' in network:
print(('can\'t verify if container\'s IP addresses '
'are valid for Docker network {}: missing IPAM'.format(dckr_net_name)))
break
ipam = network['IPAM']
if not 'Config' in ipam:
print(('can\'t verify if container\'s IP addresses '
'are valid for Docker network {}: missing IPAM.Config'.format(dckr_net_name)))
break
ip_ok = False
network_subnets = [item['Subnet'] for item in ipam['Config'] if 'Subnet' in item]
for ip in ipv4_addresses:
for subnet in network_subnets:
ip_ok = netaddr.IPAddress(ip) in netaddr.IPNetwork(subnet)
if not ip_ok:
print(('the container\'s IP address {} is not valid for Docker network {} '
'since it\'s not part of any of its subnets ({})'.format(
ip, dckr_net_name, ', '.join(network_subnets))))
print(('Please consider removing the Docket network {net} '
'to allow bgperf to create it again using the '
'expected subnet:\n'
' docker network rm {net}'.format(net=dckr_net_name)))
sys.exit(1)
break
if net_id is None:
print('Docker network "{}" not found!'.format(dckr_net_name))
return
dckr.connect_container_to_network(self.ctn_id, net_id, ipv4_address=ipv4_addresses[0])
dckr.start(container=self.name)
if len(ipv4_addresses) > 1:
# get the interface used by the first IP address already added by Docker
dev = None
pxlen = None
res = self.local('ip addr').decode("utf-8")
for line in res.split('\n'):
if ipv4_addresses[0] in line:
dev = line.split(' ')[-1].strip()
pxlen = line.split('/')[1].split(' ')[0].strip()
if not dev:
dev = "eth0"
pxlen = 8
for ip in ipv4_addresses[1:]:
self.local(f'ip addr add {ip}/{pxlen} dev {dev}')
return ctn
def stats(self, queue):
def stats():
if self.stop_monitoring:
return
for stat in dckr.stats(self.ctn_id, decode=True):
if self.stop_monitoring:
return
cpu_percentage = 0.0
prev_cpu = stat['precpu_stats']['cpu_usage']['total_usage']
if 'system_cpu_usage' in stat['precpu_stats']:
prev_system = stat['precpu_stats']['system_cpu_usage']
else:
prev_system = 0
cpu = stat['cpu_stats']['cpu_usage']['total_usage']
system = stat['cpu_stats']['system_cpu_usage'] if 'system_cpu_usage' in stat['cpu_stats'] else 0
if not 'percpu_usage' in stat['cpu_stats']['cpu_usage']:
continue
cpu_num = len(stat['cpu_stats']['cpu_usage']['percpu_usage'])
cpu_delta = float(cpu) - float(prev_cpu)
system_delta = float(system) - float(prev_system)
if system_delta > 0.0 and cpu_delta > 0.0:
cpu_percentage = (cpu_delta / system_delta) * float(cpu_num) * 100.0
mem_usage = stat['memory_stats'].get('usage', 0)
queue.put({'who': self.name, 'cpu': cpu_percentage, 'mem': mem_usage, 'time': datetime.datetime.now()})
t = Thread(target=stats)
t.daemon = True
t.start()
def neighbor_stats(self, queue):
def stats():
while True:
if self.stop_monitoring:
return
neighbors_checked = self.get_neighbor_received_routes()
queue.put({'who': self.name, 'neighbors_checked': neighbors_checked})
time.sleep(1)
t = Thread(target=stats)
t.daemon = True
t.start()
def local(self, cmd, stream=False, detach=False, stderr=False):
i = dckr.exec_create(container=self.name, cmd=cmd, stderr=stderr)
return dckr.exec_start(i['Id'], stream=stream, detach=detach)
def get_startup_cmd(self):
raise NotImplementedError()
def get_version_cmd(self):
raise NotImplementedError()
def exec_version_cmd(self):
version = self.get_version_cmd()
i = dckr.exec_create(container=self.name, cmd=version, stderr=False)
return dckr.exec_start(i['Id'], stream=False, detach=False).decode('utf-8')
def exec_startup_cmd(self, stream=False, detach=False):
startup_content = self.get_startup_cmd()
if not startup_content:
return
filename = '{0}/start.sh'.format(self.host_dir)
with open(filename, 'w') as f:
f.write(startup_content)
os.chmod(filename, 0o777)
return self.local('{0}/start.sh'.format(self.guest_dir),
detach=detach,
stream=stream)
def get_test_counts(self):
'''gets the configured counts that each tester is supposed to send'''
tester_count = {}
neighbors_checked = {}
for tester in self.scenario_global_conf['testers']:
for n in tester['neighbors'].keys():
tester_count[n] = tester['neighbors'][n]['check-points']
neighbors_checked[n] = False
return tester_count, neighbors_checked
def get_neighbor_received_routes(self):
## if we ccall this before the daemon starts we will not get output
tester_count, neighbors_checked = self.get_test_counts()
neighbors_received = self.get_neighbors_received()
for n in neighbors_received.keys():
#this will include the monitor, we don't want to check that
if n in tester_count and neighbors_received[n] >= tester_count[n]:
neighbors_checked[n] = True
return neighbors_checked
class Target(Container):
CONFIG_FILE_NAME = None
def write_config(self):
raise NotImplementedError()
def use_existing_config(self):
if 'config_path' in self.conf:
with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:
with open(self.conf['config_path'], 'r') as orig:
f.write(orig.read())
return True
return False
def run(self, scenario_global_conf, dckr_net_name=''):
self.scenario_global_conf = scenario_global_conf
ctn = super(Target, self).run(dckr_net_name)
if not self.use_existing_config():
self.write_config()
self.exec_startup_cmd(detach=True)
return ctn
class Tester(Container):
CONTAINER_NAME_PREFIX = None
def __init__(self, name, host_dir, conf, image):
Container.__init__(self, self.CONTAINER_NAME_PREFIX + name, image, host_dir, self.GUEST_DIR, conf)
def get_ipv4_addresses(self):
res = []
peers = list(self.conf.get('neighbors', {}).values())
for p in peers:
res.append(p['local-address'])
return res
def configure_neighbors(self, target_conf):
raise NotImplementedError()
def run(self, target_conf, dckr_net_name):
self.ctn = super(Tester, self).run(dckr_net_name)
self.configure_neighbors(target_conf)
def launch(self):
output = self.exec_startup_cmd(stream=True, detach=False)
cnt = 0
prev_pid = 0
for lines in output: # This is the ExaBGP output
lines = lines.decode("utf-8").strip().split('\n')
for line in lines:
fields = line.split('|')
if len(fields) >2:
# Get PID from ExaBGP output
try:
# ExaBGP Version >= 4
# e.g. 00:00:00 | 111 | control | command/comment
pid = int(fields[1])
except ValueError:
# ExaBGP Version = 3
# e.g. 00:00:00 | INFO | 111 | control | command
pid = int(fields[2])
if pid != prev_pid:
prev_pid = pid
cnt += 1
if cnt > 1:
rm_line()
print('tester booting.. ({0}/{1})'.format(cnt, len(list(self.conf.get('neighbors', {}).values()))))
else:
print(lines)
return None
|
PC_Miner.py | #!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.4)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
def install(package):
# Install pip package automatically
check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, * sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Requests is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"requests\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed. "
+ "Continuing without xxhash support.")
xxhash_enabled = False
# Global variables
minerVersion = "2.4" # Version number
timeout = 30 # Socket timeout
resourcesFolder = "PCMiner_" + str(minerVersion) + "_resources"
donatorrunning = False
debug = "n"
rigIdentifier = "None"
requestedDiff = "NET"
algorithm = "DUCO-S1"
serveripfile = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") # Serverip file
config = ConfigParser()
donationlevel = 0
thread = []
totalhashrate_mean = []
# Create resources folder if it doesn't exist
if not path.exists(resourcesFolder):
mkdir(resourcesFolder)
# Check if languages file exists
if not Path(resourcesFolder + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(resourcesFolder + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(resourcesFolder + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(resourcesFolder + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(resourcesFolder + "/Miner_config.cfg")
lang = config["miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debugOutput(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
prettyPrint(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requestedDiff == "LOW":
diffName = getString("low_diff_short")
elif requestedDiff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(minerVersion)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debugOutput("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donationlevel))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " @ "
+ diffName)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rigIdentifier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if osname == "nt":
# Initial miner executable section
if not Path(resourcesFolder + "/Donate_executable.exe").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
# Initial miner executable section
if not Path(resourcesFolder + "/Donate_executable").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableLinux?raw=true")
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable", "wb") as f:
f.write(r.content)
def loadConfig():
# Config loading section
global username
global efficiency
global donationlevel
global debug
global threadcount
global requestedDiff
global rigIdentifier
global lang
global algorithm
# Initial configuration
if not Path(resourcesFolder + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ resourcesFolder
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ getString("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ getString("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ getString("net_diff"))
requestedDiff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rigIdentifier == "y" or rigIdentifier == "Y":
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rigIdentifier = "None"
donationlevel = "0"
if osname == "nt" or osname == "posix":
donationlevel = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(10):
threadcount = 10
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requestedDiff == "1":
requestedDiff = "LOW"
elif requestedDiff == "2":
requestedDiff = "MEDIUM"
else:
requestedDiff = "NET"
# Check wheter donationlevel is correct
donationlevel = sub(r"\D", "", donationlevel)
if donationlevel == "":
donationlevel = 1
elif float(donationlevel) > int(5):
donationlevel = 5
elif float(donationlevel) < int(0):
donationlevel = 0
# Format data
config["miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requestedDiff": requestedDiff,
"donate": donationlevel,
"identifier": rigIdentifier,
"algorithm": algorithm,
"language": lang,
"debug": "n"}
# Write data to configfile
with open(resourcesFolder + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
# Calulate efficiency for later use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(resourcesFolder + "/Miner_config.cfg")
username = config["miner"]["username"]
efficiency = config["miner"]["efficiency"]
threadcount = config["miner"]["threads"]
requestedDiff = config["miner"]["requestedDiff"]
donationlevel = config["miner"]["donate"]
algorithm = config["miner"]["algorithm"]
rigIdentifier = config["miner"]["identifier"]
debug = config["miner"]["debug"]
# Calulate efficiency for use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
def Donate():
global donationlevel
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ resourcesFolder
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ resourcesFolder
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donationlevel) <= 0:
prettyPrint(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donationlevel) == 5:
cmd += "95"
elif int(donationlevel) == 4:
cmd += "75"
elif int(donationlevel) == 3:
cmd += "50"
elif int(donationlevel) == 2:
cmd += "20"
elif int(donationlevel) == 1:
cmd += "10"
if int(donationlevel) > 0:
debugOutput(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
prettyPrint(
"sys0",
getString("thanks_donation"),
"warning")
def ducos1(
lastBlockHash,
expectedHash,
difficulty):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid,
accepted,
rejected,
requestedDiff,
khashcount,
username,
efficiency,
rigIdentifier,
algorithm,
hashrates_list,
totalhashrate_mean):
# Mining section for every thread
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(serveripfile, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
# Line 1 = IP
masterServer_address = content[0]
# Line 2 = port
masterServer_port = content[1]
debugOutput(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port))
break
except Exception as e:
# If there was an error with grabbing data from GitHub
prettyPrint(
"net"
+ str(threadid),
getString("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debugOutput("GitHub error: " + str(e))
sleep(10)
# Connect to the server
while True:
try:
soc = socket()
# Establish socket connection to the server
soc.connect((str(masterServer_address),
int(masterServer_port)))
soc.settimeout(timeout)
serverVersion = soc.recv(3).decode().rstrip(
"\n") # Get server version
debugOutput("Server version: " + serverVersion)
if (float(serverVersion) <= float(minerVersion)
and len(serverVersion) == 3):
# If miner is up-to-date, display a message and continue
prettyPrint(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
# Miner is outdated
prettyPrint(
"sys"
+ str(threadid),
getString("outdated_miner")
+ minerVersion
+ ") -"
+ getString("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
break
except Exception as e:
# Socket connection error
prettyPrint(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debugOutput("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
prettyPrint(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
# If efficiency lower than 100...
if float(100 - efficiency * 100) < 100:
# ...sleep some time
sleep(float(efficiency * 5))
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.send(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requestedDiff),
encoding="utf8"))
else:
soc.send(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requestedDiff),
encoding="utf8"))
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",") # Get work from pool
debugOutput("Received: " + str(job))
if job[1] == "This user doesn't exist":
prettyPrint(
"cpu"
+ str(threadid),
getString("mining_user")
+ str(username)
+ getString("mining_not_exist")
+ Style.NORMAL
+ Fore.RESET
+ getString("mining_not_exist_warning"),
"error")
sleep(10)
elif job[0] and job[1] and job[2]:
diff = int(job[2])
debugOutput(str(threadid) +
"Job received: "
+ str(job))
# If job received, continue to hashing algo
break
while True:
# Call DUCOS-1 hasher
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff)
computetimeStop = time()
# Measure compute time
computetime = computetimeStop - computetimeStart
# Convert it to miliseconds
computetime = computetime
# Read result from ducos1 hasher
ducos1res = result[0]
debugOutput("Thread "
+ str(threadid)
+ ": result found: "
+ str(ducos1res))
# Convert H/s to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.send(bytes(
str(ducos1res)
+ ","
+ str(threadhashcount * 1000)
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(minerVersion)
+ ","
+ str(rigIdentifier),
encoding="utf8"))
responsetimetart = now()
# Get feedback
feedback = soc.recv(8).decode().rstrip("\n")
responsetimestop = now()
# Measure server ping
ping = str(int(
(responsetimestop - responsetimetart).microseconds
/ 1000))
debugOutput("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
else:
# Stay with kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(minerVersion)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(minerVersion)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(minerVersion)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ algo_back_color
+ Back.YELLOW
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
break
break
except Exception as e:
prettyPrint(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debugOutput("Error while mining: " + str(e))
sleep(5)
break
def prettyPrint(messageType, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if messageType.startswith("net"):
background = Back.BLUE
elif messageType.startswith("cpu"):
background = Back.YELLOW
if messageType.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ messageType
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debugOutput("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debugOutput("Error launching Discord RPC thead: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debugOutput("Rich presence updated")
except Exception as e:
# Discord not launched
debugOutput("Error launching Discord RPC thead: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
# Processor info
cpu = cpuinfo.get_cpu_info()
# Colorama
init(autoreset=True)
title(getString("duco_python_miner") + str(minerVersion) + ")")
try:
from multiprocessing import Manager, Process, Value, cpu_count, current_process
manager = Manager()
# Multiprocessing fix for pyinstaller
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
prettyPrint(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 15s.",
"error")
sleep(15)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debugOutput("Config file loaded")
except Exception as e:
prettyPrint(
"sys0",
getString("load_config_error")
+ resourcesFolder
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debugOutput("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debugOutput("Greeting displayed")
except Exception as e:
prettyPrint(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debugOutput("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debugOutput("Error launching donation thread: " + str(e))
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requestedDiff,
khashcount,
username,
efficiency,
rigIdentifier,
algorithm,
hashrates_list,
totalhashrate_mean))
thread[x].start()
sleep(0.1)
except Exception as e:
prettyPrint(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debugOutput("Error launching CPU thead(s): " + str(e))
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debugOutput("Error launching Discord RPC thead: " + str(e))
|
ttt_client_gui.py | #! /usr/bin/python3
# Import the GUI library Tkinter
import tkinter
# Import the messagebox module explicitly
from tkinter import messagebox
# Import the webbroswer module for opening a link
import webbrowser
# Import the client module
from ttt_client import TTTClientGame
# Import multi-threading module
import threading
# Import socket
import socket
# Constants
C_WINDOW_WIDTH = 640;
C_WINDOW_HEIGHT = 480;
C_WINDOW_MIN_WIDTH = 480;
C_WINDOW_MIN_HEIGHT = 360;
C_COLOR_BLUE_LIGHT = "#e4f1fe";
C_COLOR_BLUE_DARK = "#304e62";
C_COLOR_BLUE = "#a8d4f2";
class CanvasWidget:
"""(Abstract) The base class for all the canvas widgets."""
__count = 0; # Count the number of widgets initialized
def __init__(self, canvas):
"""Initializes the widget."""
self.canvas = canvas;
# Generate a unique id for each widget (for tags)
self.id = str(CanvasWidget.__count);
CanvasWidget.__count = CanvasWidget.__count + 1;
# Generate a unique tag for each widget
self.tag_name = self.__class__.__name__ + self.id;
# Initialize instance variables
self.__disabled__ = False;
# Set default colors
self.normal_color = C_COLOR_BLUE;
self.hovered_color = C_COLOR_BLUE_DARK;
def set_clickable(self, clickable):
"""Sets if the widget can be clicked."""
if(clickable):
self.canvas.tag_bind(self.tag_name, "<Button-1>",
self.__on_click__);
else:
self.canvas.tag_unbind(self.tag_name, "<Button-1>");
def __on_click__(self, event):
"""(Private) This function will be called when the user clicks on
the widget."""
if(self.__disabled__):
return False;
if self.command is not None:
self.command();
return True;
else:
print("Error: " + self.__class__.__name__ + " " +
self.id + " does not have a command");
raise AttributeError;
return False;
def set_hoverable(self, hoverable):
"""Sets if the widget can be hovered."""
if(hoverable):
self.canvas.tag_bind(self.tag_name, "<Enter>",
self.__on_enter__);
self.canvas.tag_bind(self.tag_name, "<Leave>",
self.__on_leave__);
else:
self.canvas.tag_unbind(self.tag_name, "<Enter>");
self.canvas.tag_unbind(self.tag_name, "<Leave>");
def __on_enter__(self, event):
"""(Private) This function will be called when the mouse enters
into the widget."""
if(self.__disabled__):
return False;
self.canvas.itemconfig(self.tag_name, fill=self.hovered_color);
return True;
def __on_leave__(self, event):
"""(Private) This function will be called when the mouse leaves
the widget."""
if(self.__disabled__):
return False;
self.canvas.itemconfig(self.tag_name, fill=self.normal_color);
return True;
def disable(self):
"""Disables the widget so it won't respond to any events."""
self.__disabled__ = True;
def enable(self):
"""Enables the widget so it starts to respond to events."""
self.__disabled__ = False;
def is_enabled(self):
"""Returns True if the widget is disabled."""
return self.__disabled__;
def config(self, **kwargs):
"""Configures the widget's options."""
return self.canvas.itemconfig(self.tag_name, **kwargs);
def delete(self):
self.canvas.delete(self.tag_name);
class CanvasClickableLabel(CanvasWidget):
"""A clickable label that shows text and can respond to user
click events."""
def __init__(self, canvas, x, y, label_text, normal_color,
hovered_color):
"""Initializes the clickable label object."""
# Initialize super class
CanvasWidget.__init__(self, canvas);
# Set color scheme for different states
self.normal_color = normal_color;
self.hovered_color = hovered_color;
# Create the clickable label text
canvas.create_text(x, y, font="Helvetica 14 underline",
text=label_text, fill=self.normal_color, tags=(self.tag_name));
# Bind events
self.set_hoverable(True);
self.set_clickable(True);
class CanvasButton(CanvasWidget):
"""A button that responds to mouse clicks."""
# Define constant width and height
WIDTH = 196;
HEIGHT = 32;
def __init__(self, canvas, x, y, button_text, normal_color,
hovered_color, normal_text_color, hovered_text_color):
"""Initialize the button object."""
# Initialize super class
CanvasWidget.__init__(self, canvas);
# Set color scheme for different states
self.normal_color = normal_color;
self.hovered_color = hovered_color;
self.normal_text_color = normal_text_color;
self.hovered_text_color = hovered_text_color;
# Create the rectangle background
canvas.create_rectangle(x - self.WIDTH/2 + self.HEIGHT/2,
y - self.HEIGHT/2, x + self.WIDTH/2 - self.HEIGHT/2,
y + self.HEIGHT/2, fill=self.normal_color, outline="",
tags=(self.tag_name, "rect" + self.id));
# Create the two circles on both sides to create a rounded edge
canvas.create_oval(x - self.WIDTH/2, y - self.HEIGHT/2,
x - self.WIDTH/2 + self.HEIGHT, y + self.HEIGHT/2,
fill=self.normal_color, outline="",
tags=(self.tag_name, "oval_l" + self.id));
canvas.create_oval(x + self.WIDTH/2 - self.HEIGHT,
y - self.HEIGHT/2, x + self.WIDTH/2, y + self.HEIGHT/2,
fill=self.normal_color, outline="",
tags=(self.tag_name, "oval_r" + self.id));
# Create the button text
canvas.create_text(x, y, font="Helvetica 16 bold",
text=button_text, fill=self.normal_text_color,
tags=(self.tag_name, "text" + self.id));
# Bind events
self.set_hoverable(True);
self.set_clickable(True);
def __on_enter__(self, event):
"""(Override) Change the text to a different color when the
enter event is triggered."""
if(super().__on_enter__(event)):
self.canvas.itemconfig("text" + self.id,
fill=self.hovered_text_color);
def __on_leave__(self, event):
"""(Override) Change the text to a different color when the
leave event is triggered."""
if(super().__on_leave__(event)):
self.canvas.itemconfig("text" + self.id,
fill=self.normal_text_color);
class CanvasSquare(CanvasWidget):
"""A square that responds to mouse click event. This is for the grid
board."""
def __init__(self, canvas, x, y, width, normal_color, hovered_color,
disabled_color):
"""Initialize the square object."""
# Initialize super class
CanvasWidget.__init__(self, canvas);
# Set color scheme for different states
self.normal_color = normal_color;
self.hovered_color = hovered_color;
self.disabled_color = disabled_color;
# Create the circle background
canvas.create_rectangle(x - width/2, y - width/2, x + width/2,
y + width/2, fill=self.normal_color, outline="",
tags=(self.tag_name, "oval" + self.id));
# Bind events
self.set_hoverable(True);
self.set_clickable(True);
def disable(self):
"""(Override) Change the color when the square is disabled."""
super().disable();
self.canvas.itemconfig(self.tag_name, fill=self.disabled_color);
def enable(self):
"""(Override) Change the color back to normal when the square
is disabled."""
super().enable();
self.canvas.itemconfig(self.tag_name, fill=self.normal_color);
def set_temp_color(self, color):
self.canvas.itemconfig(self.tag_name, fill=color);
class BaseScene(tkinter.Canvas):
"""(Abstract) The base class for all scenes. BaseScene deals with
general widgets and handles window resizing event."""
def __init__(self, parent):
"""Initializes the scene."""
# Initialize the superclass Canvas
tkinter.Canvas.__init__(self, parent, bg=C_COLOR_BLUE_LIGHT,
width=C_WINDOW_WIDTH, height=C_WINDOW_HEIGHT);
# Bind the window-resizing event
self.bind("<Configure>", self.__on_resize__);
# Set self.width and self.height for later use
self.width = C_WINDOW_WIDTH;
self.height = C_WINDOW_HEIGHT;
def __on_resize__(self, event):
"""(Private) This function is called when the window is being
resied."""
# Determine the ratio of old width/height to new width/height
self.wscale = float(event.width)/self.width;
self.hscale = float(event.height)/self.height;
self.width = event.width;
self.height = event.height;
# Resize the canvas
self.config(width=self.width, height=self.height);
# Rescale all the objects tagged with the "all" tag
self.scale("all", 0, 0, self.wscale, self.hscale);
def create_button(self, x, y, button_text,
normal_color=C_COLOR_BLUE, hovered_color=C_COLOR_BLUE_DARK,
normal_text_color=C_COLOR_BLUE_DARK,
hovered_text_color=C_COLOR_BLUE_LIGHT):
"""Creates a button widget and returns it. Note this will
return a CanvasButton object, not the ID as other standard
Tkinter canvas widgets usually returns."""
return CanvasButton(self, x, y, button_text,
normal_color, hovered_color,
normal_text_color, hovered_text_color);
def create_square(self, x, y, width,
normal_color=C_COLOR_BLUE, hovered_color=C_COLOR_BLUE_DARK,
disabled_color=C_COLOR_BLUE_LIGHT):
"""Creates a square widget and returns it. Note this will
return a CanvasSquare object, not the ID as other standard
Tkinter canvas widgets usually returns."""
return CanvasSquare(self, x, y, width,
normal_color, hovered_color, disabled_color);
def create_clickable_label(self, x, y, button_text,
normal_color=C_COLOR_BLUE_DARK, hovered_color=C_COLOR_BLUE_LIGHT):
"""Creates a clickable label widget and returns it. Note this
will return a CanvasClickableLabel object, not the ID as other
standard Tkinter canvas widgets usually returns."""
return CanvasClickableLabel(self, x, y, button_text,
normal_color, hovered_color);
class WelcomeScene(BaseScene):
"""WelcomeScene is the first scene to show when the GUI starts."""
def __init__(self, parent):
"""Initializes the welcome scene."""
# Initialize BaseScene
super().__init__(parent);
# Create a blue arch at the top of the canvas
self.create_arc((-64, -368, C_WINDOW_WIDTH + 64, 192),
start=0, extent=-180, fill=C_COLOR_BLUE, outline="");
try:
# From the logo image file create a PhotoImage object
self.logo_image = tkinter.PhotoImage(file="res/icon.png");
# Create the logo image at the center of the canvas
logo = self.create_image((C_WINDOW_WIDTH/2,
C_WINDOW_HEIGHT/2 - 96), image=self.logo_image);
# From the title image file create a PhotoImage object
self.title_image = tkinter.PhotoImage(file="res/title.png");
# Create the logo image at the center of the canvas
title = self.create_image((C_WINDOW_WIDTH/2,
C_WINDOW_HEIGHT/2 + 48), image=self.title_image);
except:
# An error has been caught when creating the logo image
tkinter.messagebox.showerror("Error", "Can't create images.\n" +
"Please make sure the res folder is in the same directory" +
" as this script.");
# Create the Play button
play_btn = self.create_button(C_WINDOW_WIDTH/2,
C_WINDOW_HEIGHT/2 + 136, "Play");
play_btn.command = self.__on_play_clicked__;
# Create the About button
about_btn = self.create_button(C_WINDOW_WIDTH/2,
C_WINDOW_HEIGHT/2 + 192, "About");
about_btn.command = self.__on_about_clicked__;
# Tag all of the drawn widgets for later reference
self.addtag_all("all");
def __on_play_clicked__(self):
"""(Private) Switches to the main game scene when the play
button is clicked."""
self.pack_forget();
self.main_game_scene.pack();
def __on_about_clicked__(self):
"""(Private) Switches to the about scene when the about button
is clicked."""
self.pack_forget();
self.about_scene.pack();
class AboutScene(BaseScene):
"""AboutScene shows the developer and copyright information."""
def __init__(self, parent):
"""Initializes the about scene object."""
# Initialize the base scene
super().__init__(parent);
# Create a blue arch at the bottom of the canvas
self.create_arc((-128, C_WINDOW_HEIGHT - 128,
C_WINDOW_WIDTH + 128, C_WINDOW_HEIGHT + 368),
start=0, extent=180, fill=C_COLOR_BLUE, outline="");
try:
# From the Charmy image file create a PhotoImage object
self.charmy_image = tkinter.PhotoImage(file="res/charmy.png");
# Create the logo image on the left of the canvas
logo = self.create_image((C_WINDOW_WIDTH/2 - 192,
C_WINDOW_HEIGHT/2 - 48), image=self.charmy_image);
# From the title image file create a PhotoImage object
self.title_image = tkinter.PhotoImage(file="res/title.png");
# Resize the image to make it smaller
self.title_image = self.title_image.subsample(2, 2);
# Create the logo image at the center of the canvas
title = self.create_image((C_WINDOW_WIDTH/2 + 64,
C_WINDOW_HEIGHT/2 - 160), image=self.title_image);
except:
# An error has been caught when creating the logo image
tkinter.messagebox.showerror("Error", "Can't create images.\n" +
"Please make sure the res folder is in the same directory" +
" as this script.");
self.create_text(C_WINDOW_WIDTH/2 - 80, C_WINDOW_HEIGHT/2 - 96,
font="Helvetica 14", text="Developed by Charlie Chen",
anchor="w", fill=C_COLOR_BLUE_DARK);
link_charmysoft = self.create_clickable_label(C_WINDOW_WIDTH/2 - 80,
C_WINDOW_HEIGHT/2 - 64, "http://CharmySoft.com",
"#0B0080", "#CC2200");
link_charmysoft.config(anchor="w");
link_charmysoft.command = self.__on_charmysoft_clicked__;
self.create_text(C_WINDOW_WIDTH/2 - 80, C_WINDOW_HEIGHT/2,
anchor="w", font="Helvetica 14", fill=C_COLOR_BLUE_DARK,
text="Tic Tac Toe Online in Python is \n" +
"open source under the MIT license");
link_project = self.create_clickable_label(C_WINDOW_WIDTH/2 - 80,
C_WINDOW_HEIGHT/2 + 40, "http://CharmySoft.com/app/ttt-python.htm",
"#0B0080", "#CC2200");
link_project.config(anchor="w");
link_project.command = self.__on_project_link_clicked__;
self.create_text(C_WINDOW_WIDTH/2 + 64, C_WINDOW_HEIGHT/2 + 96,
font="Helvetica 16", text="Copyright (c) 2016 CharmySoft",
fill=C_COLOR_BLUE_DARK);
# Create the OK button
ok_btn = self.create_button(C_WINDOW_WIDTH/2, C_WINDOW_HEIGHT/2 + 160,
"OK", C_COLOR_BLUE_DARK, C_COLOR_BLUE_LIGHT, C_COLOR_BLUE_LIGHT,
C_COLOR_BLUE_DARK);
ok_btn.command = self.__on_ok_clicked__;
# Tag all of the drawn widgets for later reference
self.addtag_all("all");
def __on_ok_clicked__(self):
"""(Private) Switches back to the welcome scene when the ok button
is clicked."""
self.pack_forget();
self.welcome_scene.pack();
def __on_charmysoft_clicked__(self):
"""(Private) Opens CharmySoft.com in the system default browser
when the CharmySoft.com link is clicked."""
webbrowser.open("http://www.CharmySoft.com/about.htm");
def __on_project_link_clicked__(self):
"""(Private) Opens the project link in the system default browser
when it is clicked."""
webbrowser.open("http://www.CharmySoft.com/ttt-python.htm");
class MainGameScene(BaseScene):
"""MainGameScene deals with the game logic."""
def __init__(self, parent):
"""Initializes the main game scene object."""
# Initialize the base scene
super().__init__(parent);
# Initialize instance variables
self.board_grids_power = 3; # Make it a 3x3 grid board
self.board_width = 256; # The board is 256x256 wide
# Create a blue arch at the bottom of the canvas
self.create_arc((-128, C_WINDOW_HEIGHT - 64, C_WINDOW_WIDTH + 128,
C_WINDOW_HEIGHT + 368), start=0, extent=180, fill=C_COLOR_BLUE,
outline="");
# Create the return button
return_btn = self.create_button(C_WINDOW_WIDTH - 128, 32, "Go back");
return_btn.command = self.__on_return_clicked__;
self.draw_board();
# Create the player_self_text
player_self_text = self.create_text(96, 128, font="Helvetica 16",
fill=C_COLOR_BLUE_DARK, tags=("player_self_text"), anchor="n");
# Create the player_match_text
player_match_text = self.create_text(C_WINDOW_WIDTH - 96, 128,
font="Helvetica 16", fill=C_COLOR_BLUE_DARK,
tags=("player_match_text"), anchor="n");
# Create the notif text
notif_text = self.create_text(8, C_WINDOW_HEIGHT-8, anchor="sw",
font="Helvetica 16", fill=C_COLOR_BLUE_DARK, tags=("notif_text"));
# Set restart button to None so it won't raise AttributeError
self.restart_btn = None;
# Tag all of the drawn widgets for later reference
self.addtag_all("all");
def pack(self):
"""(Override) When the scene packs, start the client thread."""
super().pack();
# Start a new thread to deal with the client communication
threading.Thread(target=self.__start_client__).start();
def draw_board(self, board_line_width = 4):
"""Draws the board at the center of the screen, parameter
board_line_width determines the border line width."""
# Create squares for the grid board
self.squares = [None] * self.board_grids_power ** 2;
for i in range(0, self.board_grids_power):
for j in range(0, self.board_grids_power):
self.squares[i+j*3] = self.create_square(
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * i +
self.board_width / self.board_grids_power / 2,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * j +
self.board_width / self.board_grids_power / 2,
self.board_width / self.board_grids_power);
# Disable those squares to make them unclickable
self.squares[i+j*3].disable();
# Draw the border lines
for i in range(1, self.board_grids_power):
# Draw horizontal lines
self.create_line((C_WINDOW_WIDTH - self.board_width)/2,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * i,
(C_WINDOW_WIDTH + self.board_width)/2,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * i,
fill=C_COLOR_BLUE_DARK, width=board_line_width);
# Draw vertical lines
self.create_line((C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * i,
(C_WINDOW_HEIGHT - self.board_width)/2,
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * i,
(C_WINDOW_HEIGHT + self.board_width)/2,
fill=C_COLOR_BLUE_DARK, width=board_line_width);
def __start_client__(self):
"""(Private) Starts the client side."""
# Initialize the client object
self.client = TTTClientGameGUI();
# Gives the client a reference to self
self.client.canvas = self;
try:
# Get the host IP address
host = socket.gethostbyname('s.CharmySoft.com');
except:
# If can't get the host IP from the domain
tkinter.messagebox.showerror("Error", "Failed to get the game "+
"host address from the web domain.\n" +
"Plase check your connection.");
self.__on_return_clicked__();
return;
# Set the notif text
self.set_notif_text("Connecting to the game server " + host + "...");
# Connect to the server
if(self.client.connect("127.0.0.1", "2019")):
# If connected to the server
# Start the game
self.client.start_game();
# Close the client
self.client.close();
def __on_return_clicked__(self):
"""(Private) Switches back to the welcome scene when the return
button is clicked."""
# Clear screen
self.__clear_screen();
# Set the client to None so the client thread will stop due to error
self.client.client_socket = None;
self.client = None;
# Switch to the welcome scene
self.pack_forget();
self.welcome_scene.pack();
def set_notif_text(self, text):
"""Sets the notification text."""
self.itemconfig("notif_text", text=text);
def update_board_content(self, board_string):
"""Redraws the board content with new board_string."""
if(len(board_string) != self.board_grids_power ** 2):
# If board_string is in valid
print("The board string should be " +
str(self.board_grids_power ** 2) + " characters long.");
# Throw an error
raise Exception;
# Delete everything on the board
self.delete("board_content");
p = 16; # Padding
# Draw the board content
for i in range(0, self.board_grids_power):
for j in range(0, self.board_grids_power):
if(board_string[i+j*3] == "O"):
# If this is an "O"
self.create_oval(
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * i + p,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * j + p,
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * (i + 1) - p,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * (j + 1) - p,
fill="", outline=C_COLOR_BLUE_DARK, width=4,
tags="board_content");
elif(board_string[i+j*3] == "X"):
# If this is an "X"
self.create_line(
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * i + p,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * j + p,
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * (i + 1) - p,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * (j + 1) - p,
fill=C_COLOR_BLUE_DARK, width=4,
tags="board_content");
self.create_line(
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * (i + 1) - p,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * j + p,
(C_WINDOW_WIDTH - self.board_width)/2 +
self.board_width/self.board_grids_power * i + p,
(C_WINDOW_HEIGHT - self.board_width)/2 +
self.board_width/self.board_grids_power * (j + 1) - p,
fill=C_COLOR_BLUE_DARK, width=4,
tags="board_content");
def draw_winning_path(self, winning_path):
"""Marks on the board the path that leads to the win result."""
# Loop through the board
for i in range(0, self.board_grids_power ** 2):
if str(i) in winning_path:
# If the current item is in the winning path
self.squares[i].set_temp_color("#db2631");
def show_restart(self):
"""Creates a restart button for the user to choose to restart a
new game."""
self.restart_btn = self.create_button(C_WINDOW_WIDTH/2, C_WINDOW_HEIGHT - 32,
"Restart", C_COLOR_BLUE_DARK, C_COLOR_BLUE_LIGHT, C_COLOR_BLUE_LIGHT,
C_COLOR_BLUE_DARK);
self.restart_btn.command = self.__on_restart_clicked__;
def __clear_screen(self):
"""(Private) Clears all the existing content from the old game."""
# Clear everything from the past game
for i in range(0, self.board_grids_power ** 2):
self.squares[i].disable();
self.squares[i].set_temp_color(C_COLOR_BLUE_LIGHT);
self.update_board_content(" " * self.board_grids_power ** 2);
self.itemconfig("player_self_text", text="");
self.itemconfig("player_match_text", text="");
# Delete the button from the scene
if self.restart_btn is not None:
self.restart_btn.delete();
self.restart_btn = None;
def __on_restart_clicked__(self):
"""(Private) Switches back to the welcome scene when the return
button is clicked."""
# Clear screen
self.__clear_screen();
# Start a new thread to deal with the client communication
threading.Thread(target=self.__start_client__).start();
class TTTClientGameGUI(TTTClientGame):
"""The client implemented with GUI."""
def __connect_failed__(self):
"""(Override) Updates the GUI to notify the user that the connection
couldn't be established."""
# Write the notif text
self.canvas.set_notif_text("Can't connect to the game server.\n" +
"It might be down or blocked by your firewall.");
# Throw an error and finish the client thread
raise Exception;
def __connected__(self):
"""(Override) Updates the GUI to notify the user that the connection
has been established."""
self.canvas.set_notif_text("Server connected. \n" +
"Waiting for other players to join...");
def __game_started__(self):
"""(Override) Updates the GUI to notify the user that the game is
getting started."""
self.canvas.set_notif_text("Game started. " +
"You are the \"" + self.role + "\"");
self.canvas.itemconfig("player_self_text",
text="You:\n\nPlayer " + str(self.player_id) +
"\n\nRole: " + self.role);
self.canvas.itemconfig("player_match_text",
text="Opponent:\n\nPlayer " + str(self.match_id) +
"\n\nRole: " + ("O" if self.role == "X" else "X") );
def __update_board__(self, command, board_string):
"""(Override) Updates the board."""
# Print the command-line board for debugging purpose
super().__update_board__(command, board_string);
# Draw the GUI board
self.canvas.update_board_content(board_string);
if(command == "D"):
# If the result is a draw
self.canvas.set_notif_text("It's a draw.");
# Show the restart button
self.canvas.show_restart();
elif(command == "W"):
# If this player wins
self.canvas.set_notif_text("You WIN!");
# Show the restart button
self.canvas.show_restart();
elif(command == "L"):
# If this player loses
self.canvas.set_notif_text("You lose.");
# Show the restart button
self.canvas.show_restart();
def __player_move__(self, board_string):
"""(Override) Lets the user to make a move and sends it back to the
server."""
# Set user making move to be true
self.making_move = True;
for i in range(0, self.canvas.board_grids_power ** 2):
# Check the board content and see if it's empty
if(board_string[i] == " "):
# Enable those squares to make them clickable
self.canvas.squares[i].enable();
# Bind their commands
self.canvas.squares[i].command = (lambda self=self, i=i:
self.__move_made__(i));
while self.making_move:
# Wait until the user has clicked on something
pass;
def __player_wait__(self):
"""(Override) Lets the user know it's waiting for the other player
to make a move."""
# Print the command-line notif for debugging purpose
super().__player_wait__();
# Set the notif text on the GUI
self.canvas.set_notif_text("Waiting for the other player to make a move...");
def __opponent_move_made__(self, move):
"""(Override) Shows the user the move that the other player has taken."""
# Print the command-line notif for debugging purpose
super().__opponent_move_made__(move);
# Set the notif text on the GUI
self.canvas.set_notif_text("Your opponent took up number " + str(move) + ".\n"
"It's now your turn, please make a move.");
def __move_made__(self, index):
"""(Private) This function is called when the user clicks on the
board to make a move."""
print("User chose " + str(index + 1));
for i in range(0, self.canvas.board_grids_power ** 2):
# Disable those squares to make them unclickable
self.canvas.squares[i].disable();
# Remove their commands
self.canvas.squares[i].command = None;
# Send the position back to the server
self.s_send("i", str(index + 1));
# Set user making move to be false
self.making_move = False;
def __draw_winning_path__(self, winning_path):
"""(Override) Shows to the user the path that has caused the game to
win or lose."""
# Print the command-line winning path for debugging purpose
super().__draw_winning_path__(winning_path);
# Draw GUI the winning path
self.canvas.draw_winning_path(winning_path);
# Define the main program
def main():
# Create a Tkinter object
root = tkinter.Tk();
# Set window title
root.title("Tic Tac Toe");
# Set window minimun size
root.minsize(C_WINDOW_MIN_WIDTH, C_WINDOW_MIN_HEIGHT);
# Set window size
root.geometry(str(C_WINDOW_WIDTH) + "x" + str(C_WINDOW_HEIGHT));
try:
# Set window icon
root.iconbitmap("res/icon.ico");
except:
# An error has been caught when setting the icon
# tkinter.messagebox.showerror("Error", "Can't set the window icon.");
print("Can't set the window icon.");
# Initialize the welcome scene
welcome_scene = WelcomeScene(root);
# Initialize the about scene
about_scene = AboutScene(root);
# Initialize the main game scene
main_game_scene = MainGameScene(root);
# Give a reference for switching between scenes
welcome_scene.about_scene = about_scene;
welcome_scene.main_game_scene = main_game_scene;
about_scene.welcome_scene = welcome_scene;
main_game_scene.welcome_scene = welcome_scene;
# Start showing the welcome scene
welcome_scene.pack();
# Main loop
root.mainloop();
if __name__ == "__main__":
# If this script is running as a standalone program,
# start the main program.
main(); |
gong_obs_plugin.py | from flask import Flask, render_template, jsonify, request
import logging
import os
import requests
import threading
from os.path import normpath, realpath, dirname
### OBS hooks
def script_description():
return "<b>Gong OBS plugin</b>" + \
"<hr>" + \
"Script to receive web commands at http://localhost:28000/" + \
"<br/><br/>" + \
"© 2020 Richard Gong"
def script_update(settings):
test_input = obs.obs_data_get_string(settings, 'test_input')
logging.info(f"GONG) Test input: {test_input}")
def script_properties():
props = obs.obs_properties_create()
obs.obs_properties_add_text(props, "test_input", "Test input", obs.OBS_TEXT_DEFAULT)
return props
def script_load(settings):
logging.info("GONG) loaded")
def script_unload():
requests.get(f'http://localhost:{PORT}/kill')
### Setup
PROJECT_PATH = dirname(realpath(__file__))
app = Flask(__name__)
IS_DEV = __name__ == '__main__'
PORT = 8080 if IS_DEV else 28000
log_level = logging.INFO
logging.getLogger('werkzeug').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(log_level)
log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(module)s:%(lineno)d %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_formatter)
logger.handlers.clear()
logger.addHandler(stream_handler)
### Helpers
def get_absolute_path(relative_path):
return normpath(os.path.join(PROJECT_PATH, relative_path))
SPEAK_VBS_PATH = get_absolute_path('speak.vbs')
def get_debug_info():
debug_info = f"GONG) os.name={os.name} __name__={__name__}. IS_DEV={IS_DEV} PORT={PORT} CWD={os.getcwd()} __file__={__file__} PROJECT_PATH={PROJECT_PATH} SPEAK_VBS_PATH={SPEAK_VBS_PATH}"
logging.info(debug_info)
return debug_info
get_debug_info()
def say(s):
if os.name == 'nt':
cmd = f'cscript {SPEAK_VBS_PATH} "{s}"'
else:
cmd = f"say '[[volm 0.50]] {s}'"
logging.info(f"Saying: {cmd}")
os.system(cmd)
return s
### Routes
@app.route("/")
def home_view():
info = get_debug_info()
return render_template('home.html', info=info)
@app.route("/record-toggle")
def record_toggle_view():
recording = not obs.obs_frontend_recording_active()
if recording:
obs.obs_frontend_recording_start()
else:
obs.obs_frontend_recording_stop()
return jsonify(msg="Recording started" if recording else "Recording stopped", on=recording)
@app.route("/pause-toggle")
def pause_toggle_view():
recording = obs.obs_frontend_recording_active()
msg = None
if not recording:
msg = say('Go')
obs.obs_frontend_recording_start()
paused = False
else:
paused = not obs.obs_frontend_recording_paused()
if not paused:
msg = say('Going')
obs.obs_frontend_recording_pause(paused)
if paused:
msg = say('Stop')
return jsonify(msg=msg, on=not paused)
@app.route("/stop")
def stop_view():
recording = obs.obs_frontend_recording_active()
if not recording:
return jsonify(msg="Already not recording.", on=False)
if obs.obs_frontend_recording_paused():
return jsonify(msg="Already paused.", on=False)
obs.obs_frontend_recording_pause(True)
return jsonify(msg=say("Stop"), on=False)
@app.route("/start")
def start_view():
logging.info("Request: start_view...")
recording = obs.obs_frontend_recording_active()
if not recording:
logging.info("...Start recording!")
msg = say('Start')
obs.obs_frontend_recording_start()
return jsonify(msg=msg, on=True)
if obs.obs_frontend_recording_paused():
logging.info("...Continue recording!")
msg = say('Go')
obs.obs_frontend_recording_pause(False)
return jsonify(msg=msg, on=True)
logging.info("...Already recording!")
return jsonify(msg="Already recording", on=True)
@app.route("/kill")
def kill_view():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return render_template('msg.html', msg='Server killed.')
### This doesn't really work here. use volume_listen.py
# from pynput import keyboard
# def on_hotkey():
# logging.info("GONG) Hotkey detected")
# obs.obs_frontend_recording_start()
### Start server
def run_server(debug=False):
app.run(host='0.0.0.0', port=PORT, debug=debug)
if IS_DEV:
# run_server(debug=True)
threading.Thread(target=run_server, kwargs=dict(debug=False)).start()
else:
threading.Thread(target=run_server, kwargs=dict(debug=False)).start()
import obspython as obs
|
minion.py | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import os
import re
import sys
import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt._compat import ipaddress
from salt.utils.network import parse_host_port
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
SaltMasterUnresolvableError
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
retry_dns_count = opts.get('retry_dns_count', None)
if opts['retry_dns']:
while True:
if retry_dns_count is not None:
if retry_dns_count == 0:
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
'''
parse host:port values from opts['master'] and return valid:
master: ip address or hostname as a string
master_port: (optional) master returner port as integer
e.g.:
- master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234}
- master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}
- master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}
- master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}
'''
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only':
ret['master'] = ipaddress.ip_address(opts['master'])
else:
host, port = parse_host_port(opts['master'])
ret = {'master': host}
if port:
ret.update({'master_port': port})
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
'''
Return the proper service name based on platform
'''
return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
opts['master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _check_minions(self):
'''
Check the size of self.minions and raise an error if it's empty
'''
if not self.minions:
err = ('Minion unable to successfully connect to '
'a Salt Master.')
log.error(err)
def _spawn_minions(self, timeout=60):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self._connect_minion(minion)
self.io_loop.call_later(timeout, self._check_minions)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
self.minions.append(minion)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except SaltMasterUnresolvableError:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(minion.opts['master'])
log.error(err)
break
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if multifunc_ordered:
ret['return'][ind] = func(*args, **kwargs)
ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][ind] = True
else:
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
elif func == 'reset':
self.beacons.reset()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive'))
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
try:
self.functions['service.restart'](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
'ping_interval reached without response '
'from the master, but service.restart '
'could not be run to restart the minion '
'daemon. ping_interval requires that the '
'minion is running under an init system.'
)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: %s', matcher)
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except Exception:
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except Exception:
log.error('Invalid IP/CIDR target: %s', tgt)
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = six.text_type(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: %s ? %s', self.opts['id'], tgt)
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": %s', word)
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: %s', word)
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error(
'Detected nodegroup expansion failure of "%s"', word)
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
'Unrecognized target engine "%s" for target '
'expression "%s"', target_info['engine'], word
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
six.text_type(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(six.text_type(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match %s ? "%s" => "%s"', self.opts['id'], tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error(
'Invalid compound target: %s for results: %s', tgt, results)
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
|
tasks.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def migrate_legacy_event_data(tblname):
#
# NOTE: this function is not actually in use anymore,
# but has been intentionally kept for historical purposes,
# and to serve as an illustration if we ever need to perform
# bulk modification/migration of event data in the future.
#
if 'event' not in tblname:
return
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
if acquired is False:
return
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
def _remaining():
try:
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
return cursor.fetchone()[0]
except ProgrammingError:
# the table is gone (migration is unnecessary)
return None
with connection.cursor() as cursor:
total_rows = _remaining()
while total_rows:
with transaction.atomic():
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
last_insert_pk = cursor.fetchone()
if last_insert_pk is None:
# this means that the SELECT from the old table was
# empty, and there was nothing to insert (so we're done)
break
last_insert_pk = last_insert_pk[0]
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
if _remaining() is None:
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
logger.warn(f'{tblname} primary key migration to bigint has finished')
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'].append(f'{this_path}:{this_path}:Z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_cwd(self, instance, private_data_dir):
raise NotImplementedError
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
cwd = self.build_cwd(self.instance, private_data_dir)
res = ansible_runner.interface.run(
project_dir=cwd, event_handler=self.event_handler, finished_callback=self.finished_callback, status_handler=self.status_handler, **params
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_cwd(self, job, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_cwd(self, project_update, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_cwd(self, inventory_update, private_data_dir):
"""
There is one case where the inventory "source" is in a different
location from the private data:
- SCM, where source needs to live in the project folder
"""
src = inventory_update.source
if src == 'scm' and inventory_update.source_project_update:
return os.path.join(CONTAINER_ROOT, 'project')
return CONTAINER_ROOT
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_cwd(self, ad_hoc_command, private_data_dir):
return private_data_dir
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_cwd(self, instance, private_data_dir):
return settings.BASE_DIR
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task:
ee = self.task.instance.resolve_execution_environment()
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
common.py | import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_TIMEOUT = 120
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 240
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
CERT_MANAGER_VERSION = os.environ.get("RANCHER_CERT_MANAGER_VERSION", "v1.0.1")
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
workload_list = deployment_list + daemonset_list + cronjob_list
wls = project_client.list_workload(namespaceId=ns.id).data
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
p = wait_for_pod_to_running(p_client, pod)
assert p["status"]["phase"] == "Running"
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if 'k3s' in cluster.version["gitVersion"] and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
response = kubectl_pod_exec(pod1, cmd)
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
r = requests.post(auth_url, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": "80"}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-multinode-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-multinode-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
namespace = ["default", "kube-system"]
for ns in namespace:
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
|
test_examples.py | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from tempfile import TemporaryDirectory
from threading import Thread
import unittest
import numpy as np
import tensorflow as tf
from tensorforce import Agent, Environment, Runner
from test.unittest_base import UnittestBase
class TestExamples(UnittestBase, unittest.TestCase):
agent = dict(config=dict(eager_mode=True, create_debug_assertions=True, tf_log_level=20))
def test_quickstart(self):
self.start_tests(name='quickstart')
with TemporaryDirectory() as saver_directory, TemporaryDirectory() as summarizer_directory:
# ====================
# OpenAI-Gym environment specification
environment = dict(environment='gym', level='CartPole-v1')
# or: environment = Environment.create(
# environment='gym', level='CartPole-v1', max_episode_timesteps=500)
# PPO agent specification
agent = dict(
agent='ppo',
# Automatically configured network
network='auto',
# PPO optimization parameters
batch_size=10, update_frequency=2, learning_rate=3e-4, multi_step=10,
subsampling_fraction=0.33,
# Reward estimation
likelihood_ratio_clipping=0.2, discount=0.99, predict_terminal_values=False,
# Baseline network and optimizer
baseline=dict(type='auto', size=32, depth=1),
baseline_optimizer=dict(optimizer='adam', learning_rate=1e-3, multi_step=10),
# Regularization
l2_regularization=0.0, entropy_regularization=0.0,
# Preprocessing
state_preprocessing='linear_normalization', reward_preprocessing=None,
# Exploration
exploration=0.0, variable_noise=0.0,
# Default additional config values
config=None,
# Save model every 10 updates and keep the 5 most recent checkpoints
saver=dict(directory=saver_directory, frequency=10, max_checkpoints=5),
# Log all available Tensorboard summaries
summarizer=dict(directory=summarizer_directory, summaries='all'),
# Do not record agent-environment interaction trace
recorder=None
)
# or: Agent.create(agent='ppo', environment=environment, ...)
# with additional argument "environment" and, if applicable, "parallel_interactions"
# Initialize the runner
runner = Runner(agent=agent, environment=environment, max_episode_timesteps=500)
# Train for 200 episodes
runner.run(num_episodes=20)
runner.close()
# plus agent.close() and environment.close() if created separately
# ====================
files = set(os.listdir(path=saver_directory))
self.assertTrue(files == {
'agent.json', 'agent-0.data-00000-of-00001', 'agent-0.index',
'agent-10.data-00000-of-00001', 'agent-10.index', 'checkpoint'
})
directories = os.listdir(path=summarizer_directory)
self.assertEqual(len(directories), 1)
files = os.listdir(path=os.path.join(summarizer_directory, directories[0]))
self.assertEqual(len(files), 1)
self.assertTrue(files[0].startswith('events.out.tfevents.'))
self.finished_test()
def test_act_observe(self):
self.start_tests(name='act-observe')
# ====================
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment)
# Train for 100 episodes
for episode in range(10):
# Episode using act and observe
states = environment.reset()
terminal = False
sum_reward = 0.0
num_updates = 0
while not terminal:
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
num_updates += agent.observe(terminal=terminal, reward=reward)
sum_reward += reward
print('Episode {}: return={} updates={}'.format(episode, sum_reward, num_updates))
# Evaluate for 100 episodes
sum_rewards = 0.0
for _ in range(10):
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while not terminal:
actions, internals = agent.act(
states=states, internals=internals, independent=True, deterministic=True
)
states, terminal, reward = environment.execute(actions=actions)
sum_rewards += reward
print('Mean evaluation return:', sum_rewards / 100.0)
# Close agent and environment
agent.close()
environment.close()
# ====================
self.finished_test()
def test_act_experience_update(self):
self.start_tests(name='act-experience-update')
# ====================
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment)
# Train for 100 episodes
for episode in range(10):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_reward = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_reward += reward
print('Episode {}: {}'.format(episode, sum_reward))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
# Evaluate for 100 episodes
sum_rewards = 0.0
for _ in range(10):
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while not terminal:
actions, internals = agent.act(
states=states, internals=internals, independent=True, deterministic=True
)
states, terminal, reward = environment.execute(actions=actions)
sum_rewards += reward
print('Mean evaluation return:', sum_rewards / 100.0)
# Close agent and environment
agent.close()
environment.close()
# ====================
self.finished_test()
def test_export_saved_model(self):
self.start_tests(name='export-saved-model')
# ====================
# Batch inputs
def batch(x):
return np.expand_dims(x, axis=0)
# Unbatch outputs
def unbatch(x):
if isinstance(x, tf.Tensor): # TF tensor to NumPy array
x = x.numpy()
if x.shape == (1,): # Singleton array to Python value
return x.item()
else:
return np.squeeze(x, axis=0)
# Apply function to leaf values in nested dict
# (required for nested states/actions)
def recursive_map(function, dictionary):
mapped = dict()
for key, value in dictionary.items():
if isinstance(value, dict):
mapped[key] = recursive_map(function, value)
else:
mapped[key] = function(value)
return mapped
# ====================
with TemporaryDirectory() as directory:
# ====================
# Train agent
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
runner = Runner(agent='benchmarks/configs/ppo.json', environment=environment)
runner.run(num_episodes=10)
# Save agent SavedModel
runner.agent.save(directory=directory, format='saved-model')
runner.close()
# Model serving, potentially using different programming language etc
# (For regular model saving and loading within Python, see save_load_agent.py example)
# Load agent SavedModel
agent = tf.saved_model.load(export_dir=directory)
# Evaluate for 100 episodes
sum_rewards = 0.0
for _ in range(10):
states = environment.reset()
# Required in case of internal states:
# internals = agent.initial_internals()
# internals = recursive_map(batch, internals)
terminal = False
while not terminal:
states = batch(states)
# Required in case of nested states:
# states = recursive_map(batch, states)
auxiliaries = dict(mask=np.ones(shape=(1, 2), dtype=bool))
deterministic = True
actions = agent.act(states, auxiliaries, deterministic)
# Required in case of internal states:
# actions_internals = agent.act(states, internals, auxiliaries, deterministic)
# actions, internals = actions_internals['actions'], actions_internals['internals']
actions = unbatch(actions)
# Required in case of nested actions:
# actions = recursive_map(unbatch, actions)
states, terminal, reward = environment.execute(actions=actions)
sum_rewards += reward
print('Mean evaluation return:', sum_rewards / 100.0)
environment.close()
# ====================
self.finished_test()
def test_parallelization(self):
self.start_tests(name='parallelization')
# ====================
agent = 'benchmarks/configs/ppo.json'
environment = 'benchmarks/configs/cartpole.json'
runner = Runner(agent=agent, environment=environment, num_parallel=4)
# Batch act/observe calls to agent (otherwise essentially equivalent to single environment)
runner.run(num_episodes=10, batch_agent_calls=True)
runner.close()
# ====================
agent = 'benchmarks/configs/ppo.json'
environment = 'benchmarks/configs/cartpole.json'
runner = Runner(agent=agent, environment=environment, num_parallel=4, remote='multiprocessing')
runner.run(num_episodes=10) # optional: batch_agent_calls=True
runner.close()
# ====================
agent = 'benchmarks/configs/ppo.json'
environment = 'benchmarks/configs/cartpole.json'
def server(port):
Environment.create(environment=environment, remote='socket-server', port=port)
server1 = Thread(target=server, kwargs=dict(port=65432))
server2 = Thread(target=server, kwargs=dict(port=65433))
server1.start()
server2.start()
runner = Runner(
agent=agent, num_parallel=2, remote='socket-client', host='127.0.0.1', port=65432
)
runner.run(num_episodes=10) # optional: batch_agent_calls=True
runner.close()
server1.join()
server2.join()
# ====================
self.finished_test()
def test_record_and_pretrain(self):
self.start_tests(name='record-and-pretrain')
with TemporaryDirectory() as directory:
# ====================
# Start recording traces after 80 episodes -- by then, the environment is solved
runner = Runner(
agent=dict(
agent='benchmarks/configs/ppo.json',
recorder=dict(directory=directory, start=8)
), environment='benchmarks/configs/cartpole.json'
)
runner.run(num_episodes=10)
runner.close()
# ====================
# Trivial custom act function
def fn_act(states):
return int(states[2] < 0.0)
# Record 20 episodes
runner = Runner(
agent=dict(agent=fn_act, recorder=dict(directory=directory)),
environment='benchmarks/configs/cartpole.json'
)
# or: agent = Agent.create(agent=fn_act, recorder=dict(directory=directory))
runner.run(num_episodes=2)
runner.close()
# ====================
# Start recording traces after 80 episodes -- by then, the environment is solved
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment)
runner = Runner(agent=agent, environment=environment)
runner.run(num_episodes=8)
runner.close()
# Record 20 episodes
for episode in range(2, 4):
# Record episode experience
episode_states = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Evaluation episode
states = environment.reset()
terminal = False
while not terminal:
episode_states.append(states)
actions = agent.act(states=states, independent=True, deterministic=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
# Write recorded episode trace to npz file
np.savez_compressed(
file=os.path.join(directory, 'trace-{:09d}.npz'.format(episode)),
states=np.stack(episode_states, axis=0),
actions=np.stack(episode_actions, axis=0),
terminal=np.stack(episode_terminal, axis=0),
reward=np.stack(episode_reward, axis=0)
)
# ====================
# Pretrain a new agent on the recorded traces: for 30 iterations, feed the
# experience of one episode to the agent and subsequently perform one update
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment)
agent.pretrain(directory=directory, num_iterations=30, num_traces=1, num_updates=1)
# Evaluate the pretrained agent
runner = Runner(agent=agent, environment=environment)
runner.run(num_episodes=10, evaluation=True)
runner.close()
# Close agent and environment
agent.close()
environment.close()
# ====================
# Performance test
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment)
agent.pretrain(
directory='test/data/ppo-traces', num_iterations=30, num_traces=1, num_updates=1
)
runner = Runner(agent=agent, environment=environment)
runner.run(num_episodes=10, evaluation=True)
self.assertTrue(
all(episode_reward == 500.0 for episode_reward in runner.episode_rewards)
)
runner.close()
agent.close()
environment.close()
files = sorted(os.listdir(path=directory))
self.assertEqual(len(files), 6)
self.assertTrue(all(
file.startswith('trace-') and file.endswith('0000000{}.npz'.format(n))
for n, file in zip([0, 1, 2, 3, 8, 9], files)
))
self.finished_test()
def test_save_load_agent(self):
self.start_tests(name='save-load-agent')
with TemporaryDirectory() as checkpoint_directory, TemporaryDirectory() as numpy_directory:
# ====================
# OpenAI-Gym environment initialization
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
# PPO agent initialization
agent = Agent.create(
agent='benchmarks/configs/ppo.json', environment=environment,
# Option 1: Saver - save agent periodically every 10 updates
# and keep the 5 most recent checkpoints
saver=dict(directory=checkpoint_directory, frequency=1, max_checkpoints=5),
)
# Runner initialization
runner = Runner(agent=agent, environment=environment)
# Training
runner.run(num_episodes=10)
runner.close()
# Option 2: Explicit save
# (format: 'numpy' or 'hdf5' store only weights, 'checkpoint' stores full TensorFlow model,
# agent argument saver, specified above, uses 'checkpoint')
agent.save(directory=numpy_directory, format='numpy', append='episodes')
# Close agent separately, since created separately
agent.close()
# Load agent TensorFlow checkpoint
agent = Agent.load(directory=checkpoint_directory, format='checkpoint', environment=environment)
runner = Runner(agent=agent, environment=environment)
runner.run(num_episodes=10, evaluation=True)
runner.close()
agent.close()
# Load agent NumPy weights
agent = Agent.load(directory=numpy_directory, format='numpy', environment=environment)
runner = Runner(agent=agent, environment=environment)
runner.run(num_episodes=10, evaluation=True)
runner.close()
agent.close()
# Close environment separately, since created separately
environment.close()
# ====================
self.finished_test()
def test_temperature_controller(self):
self.start_tests(name='temperature-controller')
# ====================
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
## Compute the response for a given action and current temperature
def respond(action, current_temp, tau):
return action + (current_temp - action) * math.exp(-1.0/tau)
## Actions of a series of on, then off
sAction = pd.Series(np.array([1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0]))
sResponse = np.zeros(sAction.size)
## Update the response with the response to the action
for i in range(sAction.size):
## Get last response
if i == 0:
last_response = 0
else:
last_response = sResponse[i - 1]
sResponse[i] = respond(sAction[i], last_response, 3.0)
## Assemble and plot
df = pd.DataFrame(list(zip(sAction, sResponse)), columns=['action', 'response'])
df.plot()
# ====================
def reward(temp):
delta = abs(temp - 0.5)
if delta < 0.1:
return 0.0
else:
return -delta + 0.1
temps = [x * 0.01 for x in range(100)]
rewards = [reward(x) for x in temps]
fig=plt.figure(figsize=(12, 4))
plt.scatter(temps, rewards)
plt.xlabel('Temperature')
plt.ylabel('Reward')
plt.title('Reward vs. Temperature')
# ====================
###-----------------------------------------------------------------------------
## Imports
from tensorforce.environments import Environment
from tensorforce.agents import Agent
###-----------------------------------------------------------------------------
### Environment definition
class ThermostatEnvironment(Environment):
"""This class defines a simple thermostat environment. It is a room with
a heater, and when the heater is on, the room temperature will approach
the max heater temperature (usually 1.0), and when off, the room will
decay to a temperature of 0.0. The exponential constant that determines
how fast it approaches these temperatures over timesteps is tau.
"""
def __init__(self):
## Some initializations. Will eventually parameterize this in the constructor.
self.tau = 3.0
self.current_temp = np.random.random(size=(1,))
super().__init__()
def states(self):
return dict(type='float', shape=(1,), min_value=0.0, max_value=1.0)
def actions(self):
"""Action 0 means no heater, temperature approaches 0.0. Action 1 means
the heater is on and the room temperature approaches 1.0.
"""
return dict(type='int', num_values=2)
# Optional, should only be defined if environment has a natural maximum
# episode length
def max_episode_timesteps(self):
return super().max_episode_timesteps()
# Optional
def close(self):
super().close()
def reset(self):
"""Reset state.
"""
# state = np.random.random(size=(1,))
self.timestep = 0
self.current_temp = np.random.random(size=(1,))
return self.current_temp
def response(self, action):
"""Respond to an action. When the action is 1, the temperature
exponentially decays approaches 1.0. When the action is 0,
the current temperature decays towards 0.0.
"""
return action + (self.current_temp - action) * math.exp(-1.0 / self.tau)
def reward_compute(self):
""" The reward here is 0 if the current temp is between 0.4 and 0.6,
else it is distance the temp is away from the 0.4 or 0.6 boundary.
Return the value within the numpy array, not the numpy array.
"""
delta = abs(self.current_temp - 0.5)
if delta < 0.1:
return 0.0
else:
return -delta[0] + 0.1
def execute(self, actions):
## Check the action is either 0 or 1 -- heater on or off.
assert actions == 0 or actions == 1
## Increment timestamp
self.timestep += 1
## Update the current_temp
self.current_temp = self.response(actions)
## Compute the reward
reward = self.reward_compute()
## The only way to go terminal is to exceed max_episode_timestamp.
## terminal == False means episode is not done
## terminal == True means it is done.
terminal = False
return self.current_temp, terminal, reward
###-----------------------------------------------------------------------------
### Create the environment
### - Tell it the environment class
### - Set the max timestamps that can happen per episode
environment = environment = Environment.create(
environment=ThermostatEnvironment,
max_episode_timesteps=100)
# ====================
agent = Agent.create(
agent='tensorforce', environment=environment, update=64,
optimizer=dict(optimizer='adam', learning_rate=1e-3), objective='policy_gradient',
reward_estimation=dict(horizon=1)
)
# ====================
### Initialize
environment.reset()
## Creation of the environment via Environment.create() creates
## a wrapper class around the original Environment defined here.
## That wrapper mainly keeps track of the number of timesteps.
## In order to alter the attributes of your instance of the original
## class, like to set the initial temp to a custom value, like here,
## you need to access the `environment` member of this wrapped class.
## That is why you see the way to set the current_temp like below.
environment.current_temp = np.array([0.5])
states = environment.current_temp
internals = agent.initial_internals()
terminal = False
### Run an episode
temp = [environment.current_temp[0]]
while not terminal:
actions, internals = agent.act(states=states, internals=internals, independent=True)
states, terminal, reward = environment.execute(actions=actions)
temp += [states[0]]
### Plot the run
plt.figure(figsize=(12, 4))
ax=plt.subplot()
ax.set_ylim([0.0, 1.0])
plt.plot(range(len(temp)), temp)
plt.hlines(y=0.4, xmin=0, xmax=99, color='r')
plt.hlines(y=0.6, xmin=0, xmax=99, color='r')
plt.xlabel('Timestep')
plt.ylabel('Temperature')
plt.title('Temperature vs. Timestep')
plt.show()
# Train for 200 episodes
for _ in range(10):
states = environment.reset()
terminal = False
while not terminal:
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
# ====================
### Initialize
environment.reset()
## Creation of the environment via Environment.create() creates
## a wrapper class around the original Environment defined here.
## That wrapper mainly keeps track of the number of timesteps.
## In order to alter the attributes of your instance of the original
## class, like to set the initial temp to a custom value, like here,
## you need to access the `environment` member of this wrapped class.
## That is why you see the way to set the current_temp like below.
environment.current_temp = np.array([1.0])
states = environment.current_temp
internals = agent.initial_internals()
terminal = False
### Run an episode
temp = [environment.current_temp[0]]
while not terminal:
actions, internals = agent.act(states=states, internals=internals, independent=True)
states, terminal, reward = environment.execute(actions=actions)
temp += [states[0]]
### Plot the run
plt.figure(figsize=(12, 4))
ax=plt.subplot()
ax.set_ylim([0.0, 1.0])
plt.plot(range(len(temp)), temp)
plt.hlines(y=0.4, xmin=0, xmax=99, color='r')
plt.hlines(y=0.6, xmin=0, xmax=99, color='r')
plt.xlabel('Timestep')
plt.ylabel('Temperature')
plt.title('Temperature vs. Timestep')
plt.show()
# ====================
self.finished_test()
|
email.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : flask-mega-tutorial
# @Author : wangc
# @CreateTime : 2019/5/6 9:55
# @File : email
from threading import Thread
from flask_mail import Message
from flask import current_app
from app.extensions import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body, attachments=None, sync=False):
"""
发送电子邮件
:param subject: 标题
:param sender: 发送者
:param recipients: 接收者列表
:param text_body: 纯文本内容
:param html_body: HTML格式内容
:param attachments: 附件内容
:param sync: 是否同步发送
:return:
"""
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
for attachment in attachments:
msg.attach(*attachment)
if sync:
mail.send(msg)
else:
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
keep_alive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Webserver OK, Discord Bot OK"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
spinner.py | """
Date : 2018. 9. 1
Author : Jiwoo Park
"""
import time
import threading
import itertools
import sys
class Spinner():
spinner_char = itertools.cycle(['-','/','|','\\'])
def __init__(self):
self.stop_running = threading.Event()
self.spin_thread = threading.Thread(target=self.init_spin)
def start(self):
self.spin_thread.start()
def stop(self):
self.stop_running.set()
self.spin_thread.join()
def init_spin(self):
while not self.stop_running.is_set():
sys.stdout.write(next(self.spinner_char))
sys.stdout.flush()
time.sleep(0.25)
sys.stdout.write('\b')
def spinner(func):
spin = Spinner()
def inner():
spin.start()
func()
spin.stop()
return inner
@spinner
def do_work():
time.sleep(3)
if __name__ == "__main__":
do_work()
|
wiiremote.py | # -*- coding: utf-8 -*-
import cwiid
import threading
import time
class WiiRemote:
def __init__(self, n):
self.btn1 = False
self.btn2 = False
self.btnA = False
self.btnB = False
self.btnC = False
self.btnZ = False
self.btnUp = False
self.btnDown = False
self.btnLeft = False
self.btnRight = False
self.id = id
self.active = True
self.wm = None
self.stickH = 0
self.stickV = 0
# Connection à la manette Wii
print "Simultaneously press Wii remote buttons 1 and 2 now"
i = 1
while not self.wm:
try:
self.wm = cwiid.Wiimote()
except RuntimeError:
if i > 10:
quit()
break
print "Failed to connect to Wii remote"
print "Tentative " + str(i)
i += 1
print "Wii remote successfully connected"
self.wm.led = n
self.wm.rumble = True
time.sleep(.2)
self.wm.rumble = False
def _robotRemote(self, freq):
self.wm.rpt_mode = cwiid.RPT_BTN | cwiid.RPT_ACC | cwiid.RPT_NUNCHUK
nunHRange = 222. - 22.
nunHCenter = 122.
nunVRange = 231. - 38.
nunVCenter = 134.
while self.active:
buttons = self.wm.state['buttons']
nunAcc = self.wm.state['nunchuk']['acc']
nunButtons = self.wm.state['nunchuk']['buttons']
nunStick = self.wm.state['nunchuk']['stick']
nunStickH, nunStickV = nunStick
self.stickH = (float(nunStickH) - nunHCenter) / nunHRange
self.stickV = (float(nunStickV) - nunVCenter) / nunVRange
if buttons & cwiid.BTN_A:
self.btnA = True
else:
self.btnA = False
if nunButtons & cwiid.NUNCHUK_BTN_Z:
self.btnZ = True
else:
self.btnZ = False
time.sleep(1 / freq)
def _release(self):
self.active = False
print "Disconnecting Wii remote\n"
self.wm.rumble = True
time.sleep(.2)
self.wm.rumble = False
self.wm.led = 0
def robotRemote(self, freq):
thread1 = threading.Thread(target = self._robotRemote, args = [freq])
thread1.start()
def release(self):
if self.active:
thread2 = threading.Thread(target = self._release, args = [])
thread2.start()
def setLed(self, led):
self.wm.led = led
def getLed(self):
return self.wm.led
|
_app.py | import select
import sys
import threading
import time
import traceback
import six
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from ._logging import *
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
__all__ = ["WebSocketApp"]
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: callback object which is called when receive continued
frame data.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. if 0, the data continue
keep_running: a boolean flag indicating whether the app's main loop
should keep running, defaults to True
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close()
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
self.sock.ping()
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not received.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
host: update host header.
origin: update origin header.
"""
if not ping_timeout or ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
close_frame = None
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message and True or False,
skip_utf8_validation=skip_utf8_validation)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
while self.sock.connected:
r, w, e = select.select(
(self.sock.sock, ), (), (), ping_timeout)
if not self.keep_running:
break
if r:
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
close_frame = frame
break
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and frame.opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
if ping_timeout and self.last_ping_tm \
and time.time() - self.last_ping_tm > ping_timeout \
and self.last_ping_tm - self.last_pong_tm > ping_timeout:
raise WebSocketTimeoutException("ping/pong timed out")
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
finally:
if thread and thread.isAlive():
event.set()
thread.join()
self.keep_running = False
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
import inspect
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as e:
error("error from callback {}: {}".format(callback, e))
if isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
power_monitoring.py | import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 10*3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen
return should_shutdown
|
pipeline_ops_test.py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.pipeline_ops."""
import copy
import os
import threading
import time
from absl.testing import parameterized
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import test_case_utils as tu
from ml_metadata.proto import metadata_store_pb2
def _test_pipeline(pipeline_id,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode = (
pipeline_pb2.Pipeline.ASYNC)):
pipeline = pipeline_pb2.Pipeline()
pipeline.pipeline_info.id = pipeline_id
pipeline.execution_mode = execution_mode
if execution_mode == pipeline_pb2.Pipeline.SYNC:
pipeline.runtime_spec.pipeline_run_id.field_value.string_value = 'run0'
return pipeline
class PipelineOpsTest(tu.TfxTest, parameterized.TestCase):
def setUp(self):
super(PipelineOpsTest, self).setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_initiate_pipeline_start(self, pipeline):
with self._mlmd_connection as m:
# Initiate a pipeline start.
pipeline_state1 = pipeline_ops.initiate_pipeline_start(m, pipeline)
self.assertProtoPartiallyEquals(
pipeline, pipeline_state1.pipeline, ignored_fields=['runtime_spec'])
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state1.execution.last_known_state)
# Initiate another pipeline start.
pipeline2 = _test_pipeline('pipeline2')
pipeline_state2 = pipeline_ops.initiate_pipeline_start(m, pipeline2)
self.assertEqual(pipeline2, pipeline_state2.pipeline)
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state2.execution.last_known_state)
# Error if attempted to initiate when old one is active.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.initiate_pipeline_start(m, pipeline)
self.assertEqual(status_lib.Code.ALREADY_EXISTS,
exception_context.exception.code)
# Fine to initiate after the previous one is inactive.
execution = pipeline_state1.execution
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
pipeline_state3 = pipeline_ops.initiate_pipeline_start(m, pipeline)
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state3.execution.last_known_state)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_non_existent_or_inactive(self, pipeline):
with self._mlmd_connection as m:
# Stop pipeline without creating one.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m,
task_lib.PipelineUid.from_pipeline(pipeline))
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
# Initiate pipeline start and mark it completed.
execution = pipeline_ops.initiate_pipeline_start(m, pipeline).execution
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop(status_lib.Status(code=status_lib.Code.OK))
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
# Try to initiate stop again.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m, pipeline_uid)
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation(self, pipeline):
with self._mlmd_connection as m:
execution = pipeline_ops.initiate_pipeline_start(m, pipeline).execution
def _inactivate(execution):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
thread = threading.Thread(
target=_inactivate, args=(copy.deepcopy(execution),))
thread.start()
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=10.0)
thread.join()
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation_timeout(self, pipeline):
with self._mlmd_connection as m:
pipeline_ops.initiate_pipeline_start(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
def test_stop_node_no_active_executions(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline).commit()
pipeline_ops.stop_node(m, node_uid)
pipeline_state = pstate.PipelineState.load(m, pipeline_uid)
# The node should be stop-initiated even when node is inactive to prevent
# future triggers.
self.assertEqual(status_lib.Code.CANCELLED,
pipeline_state.node_stop_initiated_reason(node_uid).code)
# Restart node.
pipeline_state = pipeline_ops.initiate_node_start(m, node_uid)
self.assertIsNone(pipeline_state.node_stop_initiated_reason(node_uid))
def test_stop_node_wait_for_inactivation(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline).commit()
def _inactivate(execution):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
execution = task_gen_utils.get_executions(m, trainer)[0]
thread = threading.Thread(
target=_inactivate, args=(copy.deepcopy(execution),))
thread.start()
pipeline_ops.stop_node(m, node_uid, timeout_secs=5.0)
thread.join()
pipeline_state = pstate.PipelineState.load(m, pipeline_uid)
self.assertEqual(status_lib.Code.CANCELLED,
pipeline_state.node_stop_initiated_reason(node_uid).code)
# Restart node.
pipeline_state = pipeline_ops.initiate_node_start(m, node_uid)
self.assertIsNone(pipeline_state.node_stop_initiated_reason(node_uid))
def test_stop_node_wait_for_inactivation_timeout(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline).commit()
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_node(m, node_uid, timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
# Even if `wait_for_inactivation` times out, the node should be stop
# initiated to prevent future triggers.
pipeline_state = pstate.PipelineState.load(m, pipeline_uid)
self.assertEqual(status_lib.Code.CANCELLED,
pipeline_state.node_stop_initiated_reason(node_uid).code)
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_orchestrate_active_pipelines(self, mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
# Sync and async active pipelines.
async_pipelines = [
_test_pipeline('pipeline1'),
_test_pipeline('pipeline2'),
]
sync_pipelines = [
_test_pipeline('pipeline3', pipeline_pb2.Pipeline.SYNC),
_test_pipeline('pipeline4', pipeline_pb2.Pipeline.SYNC),
]
for pipeline in async_pipelines + sync_pipelines:
pipeline_ops.initiate_pipeline_start(m, pipeline)
# Active executions for active async pipelines.
mock_async_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[0]),
node_id='Transform'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[1]),
node_id='Trainer'))
],
]
# Active executions for active sync pipelines.
mock_sync_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[0]),
node_id='Trainer'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[1]),
node_id='Validator'))
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
self.assertEqual(2, mock_async_task_gen.return_value.generate.call_count)
self.assertEqual(2, mock_sync_task_gen.return_value.generate.call_count)
# Verify that tasks are enqueued in the expected order.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline1', 'Transform'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline2', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline3', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline4', 'Validator'), task.node_uid)
self.assertTrue(task_queue.is_empty())
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_stop_initiated_pipelines(self, pipeline, mock_gen_task_from_active,
mock_async_task_gen, mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
mock_service_job_manager.is_mixed_service_node.side_effect = (
lambda _, node_id: node_id == 'Transform')
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
pipeline_state.initiate_stop(
status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_execution = pipeline_state.execution
task_queue = tq.TaskQueue()
# For the stop-initiated pipeline, "Transform" execution task is in queue,
# "Trainer" has an active execution in MLMD but no task in queue,
# "Evaluator" has no active execution.
task_queue.enqueue(
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Transform')))
transform_task = task_queue.dequeue() # simulates task being processed
mock_gen_task_from_active.side_effect = [
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Trainer'),
is_cancelled=True), None, None, None, None
]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# There are no active pipelines so these shouldn't be called.
mock_async_task_gen.assert_not_called()
mock_sync_task_gen.assert_not_called()
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
mock_service_job_manager.reset_mock()
task_queue.task_done(transform_task) # Pop out transform task.
# CancelNodeTask for the "Transform" ExecNodeTask should be next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual('Transform', task.node_uid.node_id)
# ExecNodeTask (with is_cancelled=True) for "Trainer" is next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual('Trainer', task.node_uid.node_id)
self.assertTrue(task.is_cancelled)
self.assertTrue(task_queue.is_empty())
mock_gen_task_from_active.assert_has_calls([
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[2].pipeline_node,
mock.ANY,
is_cancelled=True),
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[3].pipeline_node,
mock.ANY,
is_cancelled=True)
])
self.assertEqual(2, mock_gen_task_from_active.call_count)
# Pipeline execution should continue to be active since active node
# executions were found in the last call to `orchestrate`.
[execution] = m.store.get_executions_by_id([pipeline_execution.id])
self.assertTrue(execution_lib.is_execution_active(execution))
# Call `orchestrate` again; this time there are no more active node
# executions so the pipeline should be marked as cancelled.
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertTrue(task_queue.is_empty())
[execution] = m.store.get_executions_by_id([pipeline_execution.id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
# stop_node_services should be called on both ExampleGen and Transform
# which are service nodes.
mock_service_job_manager.stop_node_services.assert_has_calls(
[mock.call(mock.ANY, 'ExampleGen'),
mock.call(mock.ANY, 'Transform')],
any_order=True)
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_active_pipelines_with_stop_initiated_nodes(self,
mock_gen_task_from_active,
mock_async_task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline')
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
example_gen_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
transform_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[1].pipeline_node)
transform_task = test_utils.create_exec_node_task(
node_uid=transform_node_uid)
trainer_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[2].pipeline_node)
trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid)
evaluator_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[3].pipeline_node)
evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid)
cancelled_evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid, is_cancelled=True)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
# Stop example-gen, trainer and evaluator.
pipeline_state.initiate_node_stop(
example_gen_node_uid,
status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_state.initiate_node_stop(
trainer_node_uid, status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_state.initiate_node_stop(
evaluator_node_uid, status_lib.Status(code=status_lib.Code.ABORTED))
task_queue = tq.TaskQueue()
# Simulate a new transform execution being triggered.
mock_async_task_gen.return_value.generate.return_value = [transform_task]
# Simulate ExecNodeTask for trainer already present in the task queue.
task_queue.enqueue(trainer_task)
# Simulate Evaluator having an active execution in MLMD.
mock_gen_task_from_active.side_effect = [evaluator_task]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertEqual(1, mock_async_task_gen.return_value.generate.call_count)
# stop_node_services should be called on example-gen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
# Verify that tasks are enqueued in the expected order:
# Pre-existing trainer task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(trainer_task, task)
# CancelNodeTask for trainer.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(trainer_node_uid, task.node_uid)
# ExecNodeTask with is_cancelled=True for evaluator.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(cancelled_evaluator_task, task)
# ExecNodeTask for newly triggered transform node.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(transform_task, task)
# No more tasks.
self.assertTrue(task_queue.is_empty())
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
def test_handling_finalize_pipeline_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
task_gen.return_value.generate.side_effect = [
[
task_lib.FinalizePipelineTask(
pipeline_uid=pipeline_uid, status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
self.assertTrue(task_queue.is_empty())
# Load pipeline state and verify stop initiation.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(finalize_reason,
pipeline_state.stop_initiated_reason())
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_handling_finalize_node_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1')
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Transform')),
task_lib.FinalizeNodeTask(
node_uid=task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Trainer'),
status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline1', 'Transform'), task.node_uid)
# Load pipeline state and verify node stop initiation.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(
finalize_reason,
pipeline_state.node_stop_initiated_reason(
task_lib.NodeUid(pipeline_uid=pipeline_uid, node_id='Trainer')))
def test_save_and_remove_pipeline_property(self):
with self._mlmd_connection as m:
pipeline1 = _test_pipeline('pipeline1')
pipeline_state1 = pipeline_ops.initiate_pipeline_start(m, pipeline1)
property_key = 'test_key'
property_value = 'bala'
self.assertIsNone(
pipeline_state1.execution.custom_properties.get(property_key))
pipeline_ops.save_pipeline_property(pipeline_state1.mlmd_handle,
pipeline_state1.pipeline_uid,
property_key, property_value)
with pstate.PipelineState.load(
m, pipeline_state1.pipeline_uid) as pipeline_state2:
self.assertIsNotNone(
pipeline_state2.execution.custom_properties.get(property_key))
self.assertEqual(
pipeline_state2.execution.custom_properties[property_key]
.string_value, property_value)
pipeline_ops.remove_pipeline_property(pipeline_state2.mlmd_handle,
pipeline_state2.pipeline_uid,
property_key)
with pstate.PipelineState.load(
m, pipeline_state2.pipeline_uid) as pipeline_state3:
self.assertIsNone(
pipeline_state3.execution.custom_properties.get(property_key))
def test_to_status_not_ok_error_decorator(self):
@pipeline_ops._to_status_not_ok_error
def fn1():
raise RuntimeError('test error 1')
@pipeline_ops._to_status_not_ok_error
def fn2():
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS, message='test error 2')
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 1') as ctxt:
fn1()
self.assertEqual(status_lib.Code.UNKNOWN, ctxt.exception.code)
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 2') as ctxt:
fn2()
self.assertEqual(status_lib.Code.ALREADY_EXISTS, ctxt.exception.code)
if __name__ == '__main__':
tf.test.main()
|
2.py | import sys
import random
import json
import concurrent.futures
import urllib.request
import uvicorn
from http.server import HTTPServer, ThreadingHTTPServer, BaseHTTPRequestHandler
from threading import Thread
from io import BytesIO
async def read_body(receive):
"""
Read and return the entire body from an incoming ASGI message.
"""
body = b''
more_body = True
while more_body:
message = await receive()
body += message.get('body', b'')
more_body = message.get('more_body', False)
return body
async def app(scope, receive, send):
assert scope['type'] == 'http'
body = await read_body(receive)
obj = json.loads(body)
await send({
'type': 'http.response.start',
'status': 200,
'headers': [],
})
await send({
'type': 'http.response.body',
'body': str(obj['value']).encode('utf-8'),
})
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
body = self.rfile.read1(-1)
obj = json.loads(body)
self.send_response(200)
self.end_headers()
response = BytesIO()
response.write(str(obj['value']).encode('utf-8'))
self.wfile.write(response.getvalue())
def run_server(port: int):
uvicorn.run("app:app", host="localhost", port=port, log_level="critical")
def send(api: str, value: int):
while True:
try:
with urllib.request.urlopen(api, json.dumps({'value': value}).encode('utf-8')) as fp:
return int(fp.read().decode('utf-8'))
except:
pass
def main():
n = 10 if len(sys.argv) < 2 else int(sys.argv[1])
port = 20000 + int(30000*random.random())
t = Thread(target=run_server, args=(port,), daemon=True)
t.start()
api = f'http://localhost:{port}'
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(send, api, i) for i in range(1, n + 1)]
sum = 0
for future in futures:
sum += future.result()
print(sum)
if __name__ == '__main__':
main()
# uvicorn.run("app:app", host="localhost", port=5000, log_level="info")
|
tests.py | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import, unicode_literals
import datetime
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.core.exceptions import ImproperlyConfigured
from django.db import (backend, connection, connections, DEFAULT_DB_ALIAS,
IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.models import fields, Sum, Avg, Variance, StdDev
from django.db.utils import ConnectionHandler, DatabaseError, load_backend
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings, str_prefix
from django.utils import six
from django.utils.six.moves import xrange
from django.utils import unittest
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle quote_name semantics")
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([six.text_type(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
c = connection.cursor()
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
c.execute(query)
self.assertEqual(c.fetchone()[0], 1)
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5,0,13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_server_version_connections(self):
connection.close()
connection.mysql_version
self.assertTrue(connection.connection is None)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_debug_sql(self):
list(models.Tag.objects.filter(name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Tag._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
tags = models.Tag.objects.extra(select={'föö': 1})
sql, params = tags.query.sql_with_params()
cursor = tags.query.get_compiler('default').execute_sql(None)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertTrue(isinstance(last_sql, six.text_type))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql' and connection.isolation_level > 0,
"This test applies only to PostgreSQL without autocommit")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
response = cursor.execute(
"select strftime('%%s', date('now'))").fetchall()[0][0]
self.assertNotEqual(response, None)
# response should be an non-zero integer
self.assertTrue(int(response))
class SqlliteAggregationTests(TestCase):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to check SQLite aggregation semantics")
def test_aggregation(self):
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
cursor.executemany(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
cursor = connection.cursor()
except backend.Database.DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
connections_set = set()
connection.cursor()
connections_set.add(connection)
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_set.add(connection)
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set([conn.connection for conn in connections_set])),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
connections_set = set()
for conn in connections.all():
connections_set.add(conn)
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_set.add(conn)
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_set), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
|
utils.py | import contextlib
import errno
import importlib
import itertools
import json
import os
import queue
import sys
import click
import click_threading
from atomicwrites import atomic_write
from . import cli_logger
from .. import BUGTRACKER_HOME
from .. import DOCS_HOME
from .. import exceptions
from ..sync.exceptions import IdentConflict
from ..sync.exceptions import PartialSync
from ..sync.exceptions import StorageEmpty
from ..sync.exceptions import SyncConflict
from ..sync.status import SqliteStatus
from ..utils import expand_path
from ..utils import get_storage_init_args
STATUS_PERMISSIONS = 0o600
STATUS_DIR_PERMISSIONS = 0o700
class _StorageIndex:
def __init__(self):
self._storages = dict(
caldav='vdirsyncer.storage.dav.CalDAVStorage',
carddav='vdirsyncer.storage.dav.CardDAVStorage',
filesystem='vdirsyncer.storage.filesystem.FilesystemStorage',
http='vdirsyncer.storage.http.HttpStorage',
lotusnotesweb=(
'vdirsyncer.storage.lotusnotesweb.LotusNotesWebStorage'),
singlefile='vdirsyncer.storage.singlefile.SingleFileStorage',
google_calendar='vdirsyncer.storage.google.GoogleCalendarStorage',
google_contacts='vdirsyncer.storage.google.GoogleContactsStorage',
etesync_calendars='vdirsyncer.storage.etesync.EtesyncCalendars',
etesync_contacts='vdirsyncer.storage.etesync.EtesyncContacts'
)
def __getitem__(self, name):
item = self._storages[name]
if not isinstance(item, str):
return item
modname, clsname = item.rsplit('.', 1)
mod = importlib.import_module(modname)
self._storages[name] = rv = getattr(mod, clsname)
assert rv.storage_name == name
return rv
storage_names = _StorageIndex()
del _StorageIndex
class JobFailed(RuntimeError):
pass
def handle_cli_error(status_name=None, e=None):
'''
Print a useful error message for the current exception.
This is supposed to catch all exceptions, and should never raise any
exceptions itself.
'''
try:
if e is not None:
raise e
else:
raise
except exceptions.UserError as e:
cli_logger.critical(e)
except StorageEmpty as e:
cli_logger.error(
'{status_name}: Storage "{name}" was completely emptied. If you '
'want to delete ALL entries on BOTH sides, then use '
'`vdirsyncer sync --force-delete {status_name}`. '
'Otherwise delete the files for {status_name} in your status '
'directory.'.format(
name=e.empty_storage.instance_name,
status_name=status_name
)
)
except PartialSync as e:
cli_logger.error(
'{status_name}: Attempted change on {storage}, which is read-only'
'. Set `partial_sync` in your pair section to `ignore` to ignore '
'those changes, or `revert` to revert them on the other side.'
.format(status_name=status_name, storage=e.storage)
)
except SyncConflict as e:
cli_logger.error(
'{status_name}: One item changed on both sides. Resolve this '
'conflict manually, or by setting the `conflict_resolution` '
'parameter in your config file.\n'
'See also {docs}/config.html#pair-section\n'
'Item ID: {e.ident}\n'
'Item href on side A: {e.href_a}\n'
'Item href on side B: {e.href_b}\n'
.format(status_name=status_name, e=e, docs=DOCS_HOME)
)
except IdentConflict as e:
cli_logger.error(
'{status_name}: Storage "{storage.instance_name}" contains '
'multiple items with the same UID or even content. Vdirsyncer '
'will now abort the synchronization of this collection, because '
'the fix for this is not clear; It could be the result of a badly '
'behaving server. You can try running:\n\n'
' vdirsyncer repair {storage.instance_name}\n\n'
'But make sure to have a backup of your data in some form. The '
'offending hrefs are:\n\n{href_list}\n'
.format(status_name=status_name,
storage=e.storage,
href_list='\n'.join(map(repr, e.hrefs)))
)
except (click.Abort, KeyboardInterrupt, JobFailed):
pass
except exceptions.PairNotFound as e:
cli_logger.error(
'Pair {pair_name} does not exist. Please check your '
'configuration file and make sure you\'ve typed the pair name '
'correctly'.format(pair_name=e.pair_name)
)
except exceptions.InvalidResponse as e:
cli_logger.error(
'The server returned something vdirsyncer doesn\'t understand. '
'Error message: {!r}\n'
'While this is most likely a serverside problem, the vdirsyncer '
'devs are generally interested in such bugs. Please report it in '
'the issue tracker at {}'
.format(e, BUGTRACKER_HOME)
)
except exceptions.CollectionRequired:
cli_logger.error(
'One or more storages don\'t support `collections = null`. '
'You probably want to set `collections = ["from a", "from b"]`.'
)
except Exception as e:
tb = sys.exc_info()[2]
import traceback
tb = traceback.format_tb(tb)
if status_name:
msg = f'Unknown error occurred for {status_name}'
else:
msg = 'Unknown error occurred'
msg += f': {e}\nUse `-vdebug` to see the full traceback.'
cli_logger.error(msg)
cli_logger.debug(''.join(tb))
def get_status_name(pair, collection):
if collection is None:
return pair
return pair + '/' + collection
def get_status_path(base_path, pair, collection=None, data_type=None):
assert data_type is not None
status_name = get_status_name(pair, collection)
path = expand_path(os.path.join(base_path, status_name))
if os.path.isfile(path) and data_type == 'items':
new_path = path + '.items'
# XXX: Legacy migration
cli_logger.warning('Migrating statuses: Renaming {} to {}'
.format(path, new_path))
os.rename(path, new_path)
path += '.' + data_type
return path
def load_status(base_path, pair, collection=None, data_type=None):
path = get_status_path(base_path, pair, collection, data_type)
if not os.path.exists(path):
return None
assert_permissions(path, STATUS_PERMISSIONS)
with open(path) as f:
try:
return dict(json.load(f))
except ValueError:
pass
return {}
def prepare_status_path(path):
dirname = os.path.dirname(path)
try:
os.makedirs(dirname, STATUS_DIR_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@contextlib.contextmanager
def manage_sync_status(base_path, pair_name, collection_name):
path = get_status_path(base_path, pair_name, collection_name, 'items')
status = None
legacy_status = None
try:
# XXX: Legacy migration
with open(path, 'rb') as f:
if f.read(1) == b'{':
f.seek(0)
legacy_status = dict(json.load(f))
except (OSError, ValueError):
pass
if legacy_status is not None:
cli_logger.warning('Migrating legacy status to sqlite')
os.remove(path)
status = SqliteStatus(path)
status.load_legacy_status(legacy_status)
else:
prepare_status_path(path)
status = SqliteStatus(path)
yield status
def save_status(base_path, pair, collection=None, data_type=None, data=None):
assert data_type is not None
assert data is not None
status_name = get_status_name(pair, collection)
path = expand_path(os.path.join(base_path, status_name)) + '.' + data_type
prepare_status_path(path)
with atomic_write(path, mode='w', overwrite=True) as f:
json.dump(data, f)
os.chmod(path, STATUS_PERMISSIONS)
def storage_class_from_config(config):
config = dict(config)
storage_name = config.pop('type')
try:
cls = storage_names[storage_name]
except KeyError:
raise exceptions.UserError(
f'Unknown storage type: {storage_name}')
return cls, config
def storage_instance_from_config(config, create=True):
'''
:param config: A configuration dictionary to pass as kwargs to the class
corresponding to config['type']
'''
cls, new_config = storage_class_from_config(config)
try:
return cls(**new_config)
except exceptions.CollectionNotFound as e:
if create:
config = handle_collection_not_found(
config, config.get('collection', None), e=str(e))
return storage_instance_from_config(config, create=False)
else:
raise
except Exception:
return handle_storage_init_error(cls, new_config)
def handle_storage_init_error(cls, config):
e = sys.exc_info()[1]
if not isinstance(e, TypeError) or '__init__' not in repr(e):
raise
all, required = get_storage_init_args(cls)
given = set(config)
missing = required - given
invalid = given - all
problems = []
if missing:
problems.append(
'{} storage requires the parameters: {}'
.format(cls.storage_name, ', '.join(missing)))
if invalid:
problems.append(
'{} storage doesn\'t take the parameters: {}'
.format(cls.storage_name, ', '.join(invalid)))
if not problems:
raise e
raise exceptions.UserError(
'Failed to initialize {}'.format(config['instance_name']),
problems=problems
)
class WorkerQueue:
'''
A simple worker-queue setup.
Note that workers quit if queue is empty. That means you have to first put
things into the queue before spawning the worker!
'''
def __init__(self, max_workers):
self._queue = queue.Queue()
self._workers = []
self._max_workers = max_workers
self._shutdown_handlers = []
# According to http://stackoverflow.com/a/27062830, those are
# threadsafe compared to increasing a simple integer variable.
self.num_done_tasks = itertools.count()
self.num_failed_tasks = itertools.count()
def shutdown(self):
while self._shutdown_handlers:
try:
self._shutdown_handlers.pop()()
except Exception:
pass
def _worker(self):
while True:
try:
func = self._queue.get(False)
except queue.Empty:
break
try:
func(wq=self)
except Exception:
handle_cli_error()
next(self.num_failed_tasks)
finally:
self._queue.task_done()
next(self.num_done_tasks)
if not self._queue.unfinished_tasks:
self.shutdown()
def spawn_worker(self):
if self._max_workers and len(self._workers) >= self._max_workers:
return
t = click_threading.Thread(target=self._worker)
t.start()
self._workers.append(t)
@contextlib.contextmanager
def join(self):
assert self._workers or not self._queue.unfinished_tasks
ui_worker = click_threading.UiWorker()
self._shutdown_handlers.append(ui_worker.shutdown)
_echo = click.echo
with ui_worker.patch_click():
yield
if not self._workers:
# Ugly hack, needed because ui_worker is not running.
click.echo = _echo
cli_logger.critical('Nothing to do.')
sys.exit(5)
ui_worker.run()
self._queue.join()
for worker in self._workers:
worker.join()
tasks_failed = next(self.num_failed_tasks)
tasks_done = next(self.num_done_tasks)
if tasks_failed > 0:
cli_logger.error('{} out of {} tasks failed.'
.format(tasks_failed, tasks_done))
sys.exit(1)
def put(self, f):
return self._queue.put(f)
def assert_permissions(path, wanted):
permissions = os.stat(path).st_mode & 0o777
if permissions > wanted:
cli_logger.warning('Correcting permissions of {} from {:o} to {:o}'
.format(path, permissions, wanted))
os.chmod(path, wanted)
def handle_collection_not_found(config, collection, e=None):
storage_name = config.get('instance_name', None)
cli_logger.warning('{}No collection {} found for storage {}.'
.format(f'{e}\n' if e else '',
json.dumps(collection), storage_name))
if click.confirm('Should vdirsyncer attempt to create it?'):
storage_type = config['type']
cls, config = storage_class_from_config(config)
config['collection'] = collection
try:
args = cls.create_collection(**config)
args['type'] = storage_type
return args
except NotImplementedError as e:
cli_logger.error(e)
raise exceptions.UserError(
'Unable to find or create collection "{collection}" for '
'storage "{storage}". Please create the collection '
'yourself.'.format(collection=collection,
storage=storage_name))
|
server.py | import os
import logging
import json
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import grpc
from google.protobuf import any_pb2
import sys
import time
from threading import Thread
import redis
import boto3
import cache
import service_pb2
import service_pb2_grpc
s3client = None
class ProcessItem(BaseModel):
user_id: str
clicked_item_ids: list = []
app = FastAPI()
# Mandatory variables in envirnment
MANDATORY_ENV_VARS = {
'AWS_REGION': 'ap-northeast-1',
'REDIS_HOST' : 'localhost',
'REDIS_PORT' : 6379,
'PORTRAIT_PORT': 5300
}
sleep_interval = 10 #second
pickle_type = 'inverted-list'
action_model_type = 'action-model'
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
@app.get('/portrait/status', tags=["monitoring"])
def status():
logging.info('Collecting status information from server & plugin...')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
statusAny = any_pb2.Any()
response.status.Unpack(statusAny)
pStatus = json.loads(statusAny.value.decode('utf-8'))
return {
'env': MANDATORY_ENV_VARS,
'redis': rCache.connection_status(),
'plugin_status': pStatus
}
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request')
return {
"result": "pong"
}
@app.get('/portrait/userid/{user_id}', tags=["portrait"])
async def get_portrait(user_id: str):
logging.info('Searching %s - user portrait from cache ...', user_id)
# Get data from plugin service
gPortraitRequest = service_pb2.GetPortraitRequest(apiVersion='v1',
metadata='Portrait', type='GetPortrait', userId=user_id)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.GetPortrait(gPortraitRequest)
results = any_pb2.Any()
response.results.Unpack(results)
results = any_pb2.Any()
response.results.Unpack(results)
resultJson = json.loads(results.value, encoding='utf-8')
return {
'code': response.code,
'description': response.description,
'results': resultJson
}
@app.post('/portrait/process', tags=["portrait_to_plugin"])
def update_portrait(processItem: ProcessItem):
logging.info('Start update_portrait() ...')
user_id = processItem.user_id
clicked_item_ids = processItem.clicked_item_ids
logging.info('user_id -> %s', user_id)
logging.info('clicked_item_ids -> %s', clicked_item_ids)
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'user_id': user_id,
'clicked_item_ids': clicked_item_ids
}).encode('utf-8')
logging.info('Invoke plugin to update portrait...')
updateRequest = service_pb2.UpdatePortraitRequest(apiVersion='v1',
metadata='Portrait', type='UpdatePortrait')
updateRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.UpdatePortrait(updateRequest)
results = any_pb2.Any()
response.results.Unpack(results)
resultJson = json.loads(results.value, encoding='utf-8')
return {
'code': response.code,
'description': response.description,
'results': resultJson
}
def read_stream_messages():
logging.info('read_stream_messages start')
read_pickle_message()
read_action_model_message()
@xasync
def read_action_model_message():
logging.info('read_action_model_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(action_model_type)
if stream_message:
logging.info("Handle existed stream action_model_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading action_model_type message')
try:
stream_message = rCache.read_stream_message_block(action_model_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime( time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep( sleep_interval )
@xasync
def read_pickle_message():
logging.info('read_pickle_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(pickle_type)
if stream_message:
logging.info("Handle existed stream pickle_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading pickle_type message')
localtime = time.asctime( time.localtime(time.time()))
logging.info('start read stream: time: {}'.format(localtime))
try:
stream_message = rCache.read_stream_message_block(pickle_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime( time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep( sleep_interval )
def handle_stream_message(stream_message):
logging.info('get stream message from {}'.format(stream_message))
file_type, file_path, file_list = parse_stream_message(stream_message)
logging.info('start reload data process in handle_stream_message')
logging.info('file_type {}'.format(file_type))
logging.info('file_path {}'.format(file_path))
logging.info('file_list {}'.format(file_list))
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'file_type': file_type,
'file_list': file_list
}).encode('utf-8')
reloadRequest = service_pb2.ReloadRequest()
reloadRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.Reload(reloadRequest)
if response.code == 0:
logging.info('reload plugin succeeded')
else:
logging.info('reload plugin failed, description: {}'.format(response.description))
def parse_stream_message(stream_message):
for stream_name, message in stream_message:
for message_id, value in message:
decode_value = convert(value)
file_type = decode_value['file_type']
file_path = decode_value['file_path']
file_list = decode_value['file_list']
return file_type, file_path, file_list
# convert stream data to str
def convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
elif isinstance(data, dict):
return dict(map(convert, data.items()))
elif isinstance(data, tuple):
return map(convert, data)
else:
return data
def check_plugin_status():
logging.info('check plugin status')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
if response.code == 0:
logging.info('plugin startup succeed')
return True
else:
logging.info('plugin startup failed')
return False
def wait_for_plugin_service():
while True:
if check_plugin_status():
return
else:
logging.info('wait for plugin startup')
time.sleep( sleep_interval )
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error("Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var]=os.environ.get(var)
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
logging.info("aws_region={}".format(aws_region))
boto3.setup_default_session(region_name=MANDATORY_ENV_VARS['AWS_REGION'])
global s3client
s3client = boto3.client('s3')
logging.info(json.dumps(s3client.list_buckets(), default=str))
# Initial redis connection
global rCache
rCache = cache.RedisCache(host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
wait_for_plugin_service()
logging.info('portrait start!')
read_stream_messages()
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['PORTRAIT_PORT'])
|
pystats_v8.py | #!/usr/bin/env python3
#Needs PIP3 install pandas numpy
from slickrpc import Proxy
import time
import sys
import datetime
import os
from subprocess import check_output, CalledProcessError
import json
import re
import platform
import calendar
import pandas as pd
import numpy as np
from threading import Thread
utxoamt = 0.00010000
ntrzdamt = -0.00083600
kmdntrzaddr = "RXL3YXG2ceaB6C5hfJcN4fvmLH2C34knhA"
txscanamount = 77777
# define function that fetchs rpc creds from .conf
def def_credentials(chain):
rpcport ='';
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Windows':
ac_dir = '%s/komodo/' % os.environ['APPDATA']
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
with open(coin_config_file, 'r') as f:
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
if len(rpcport) == 0:
if chain == 'KMD':
rpcport = 7771
else:
print("rpcport not in conf file, exiting")
print("check "+coin_config_file)
exit(1)
return(Proxy("http://%s:%s@127.0.0.1:%d"%(rpcuser, rpcpassword, int(rpcport))))
def rpcThreads(chain, t0,):
#Get the chain RPC credentials
rpc_connection = def_credentials(chain)
myaddress = r_addr
my_ntz_count = 0
#Run the RPC commands to get base info
try:
chain_getinfo = rpc_connection.getinfo()
unspents = pd.DataFrame(rpc_connection.listunspent())
notaries = rpc_connection.getnotarysendmany()
notaries[kmdntrzaddr]=0
local_addrs = rpc_connection.getaddressesbyaccount("")
#Query the RPC replies and manipulate for the list
chain_balance = chain_getinfo ["balance"]
chain_blocks = chain_getinfo ["blocks"]
chain_last_blox = rpc_connection.getblock(str(chain_blocks))
chain_ntzblk = chain_getinfo ["notarized"]
chain_cncts = chain_getinfo ["connections"]
blox_time = chain_last_blox ["time"]
except Exception as e:
chain_balance = "error"
chain_blocks = 0
chain_ntzblk = "error"
chain_cncts = e
blox_time = t0
try:
nnutxoset = unspents[unspents['amount']==0.0001]
except Exception as e:
unspents = []
nnutxoset = []
try:
if chain != 'KMD':
transactions = pd.DataFrame(rpc_connection.listtransactions("",(chain_blocks - 64)))
nntransactions = transactions[transactions['address']==kmdntrzaddr]
if len(nntransactions) > 0:
nntxbyconfirms = nntransactions.sort_values('confirmations',ascending=True)
t1 = nntxbyconfirms['time'].values[0]
readabletime = to_readabletime(t0,t1)
my_ntz_count = len(nntransactions)
else:
readabletime = ""
else:
readabletime = ""
my_ntz_count = ""
#transactions = pd.DataFrame(rpc_connection.listtransactions("",(15)))
#nntransactions = transactions[transactions['address']==kmdntrzaddr]
#if len(nntransactions) > 0:
# nntxbyconfirms = nntransactions.sort_values('confirmations',ascending=True)
# t1 = nntxbyconfirms['time'].values[0]
# readabletime = to_readabletime(t0,t1)
# my_ntz_count = ""
#else:
# readabletime = ""
except Exception as e:
nntransactions = []
readabletime = "error"
if chain != 'KMD':
if myaddress in notaries:
for block in range(64, chain_blocks):
getblock_result = rpc_connection.getblock(str(block), 2)
if len(getblock_result['tx'][0]['vout']) > 1:
vouts = getblock_result['tx'][0]['vout']
for vout in vouts[1:]:
blah = vout['scriptPubKey']['addresses'][0]
if blah in notaries:
notaries[blah] += 1
else:
print('what')
all_ntrz_df = pd.DataFrame(notaries.items(), columns=['notary','count'])
#all_ntrz_count = all_ntrz_df.sum(axis = 0, skipna=True) ['count']
#print(all_ntrz_df)
#print(chain + " Notary Payments in timeframe: " + str(all_ntrz_count))
#addresses = rpc_connection.listaddressgroupings()
#myaddress = addresses[0][0][0]
my_pymt_count = all_ntrz_df.loc[all_ntrz_df['notary'] == myaddress, 'count'].sum()
#if all_ntrz_count < 1:
# all_ntrz_count = 1
#pct_ntrz = (my_ntz_count / all_ntrz_count) * 100
#pct_ntrz = 1
else:
my_pymt_count = ""
#pct_ntrz = ""
else:
#pct_ntrz = ""
my_pymt_count = ""
try:
blocktime = to_readabletime(t0, blox_time,)
except Exception as e:
print(e)
#Build and append list items
list = (chain,chain_balance,len(unspents),len(nnutxoset),my_pymt_count,my_ntz_count,readabletime,chain_blocks,blocktime,chain_ntzblk,chain_cncts)
alt_list = (chain,chain_balance,len(unspents),chain_blocks,blocktime,chain_ntzblk,chain_cncts)
global tmpList
tmpList.append(list)
global alt_tmpList
alt_tmpList.append(alt_list)
def to_readabletime(t_zero, t_one,):
t_time = (int(t_zero) - int(t_one))
f_time = float(t_time)
if f_time < 1:
return ("0s")
day = f_time // (24 * 3600)
f_time %= (24 * 3600)
hour = f_time // 3600
f_time %= 3600
minutes = f_time // 60
f_time %= 60
seconds = f_time
d_sec = day + hour + minutes
d_minutes = day + hour
d_hours = day
if d_sec == 0:
return ("%ds" % (seconds))
elif d_minutes == 0:
return ("%dm:%ds" % (minutes, seconds))
elif d_hours == 0:
return ("%dh:%dm" % (hour, minutes))
else:
return("%dd:%dh:%dm" % (day, hour, minutes))
def print_balance():
now = datetime.datetime.now()
print("Latest stats " + (now.strftime("%Y-%m-%d %H:%M:%S")))
t0 = time.time()
tableCol = ['ASSET','BALANCE','UTXO','nnUTXO','PYMTS','NOTR','NOTR_t','chnBLOX','BLOX_t','NtrzHT','CNCT']
alt_tableCol = ['ASSET','BALANCE','UTXO','chnBLOX','BLOX_t','NtrzHT','CNCT']
#Create the thread loops
for chain in assetChains:
process = Thread(target=rpcThreads, args=[chain, t0,])
process.start()
threads.append(process)
#Destroy the thread loops
for process in threads:
process.join()
#Format the table and print
pd.set_option('display.width', None)
pd.set_option('display.max_columns',12)
pd.set_option('precision', 4)
pd.set_eng_float_format(accuracy=4, use_eng_prefix=True)
#Assemble the table
df = pd.DataFrame.from_records(tmpList, columns=tableCol)
if (df.sum(axis = 0, skipna=True) ['nnUTXO']) == 0:
df = pd.DataFrame.from_records(alt_tmpList, columns=alt_tableCol)
df.sort_values(by=['chnBLOX'], ascending=False, inplace=True)
df = df.reset_index(drop=True)
print(df)
print("")
# Define a function to check if chains are running
def running(chain):
chain_name = chain
try:
pidlist = pd.DataFrame((list(map(int,check_output(["pidof", 'komodod']).split()))), columns = ['PID'])
except CalledProcessError:
pidlist = []
for row in pidlist.itertuples():
output = str(check_output(['ps','-p',str(row.PID),'-o','cmd=']))
if str(chain_name) in output:
return True
break
elif str(chain_name) == 'KMD':
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64':
ac_dir = os.getenv('APPDATA') + '/komodo'
KMD_pid_file = str(ac_dir + '/komodod.pid')
try:
with open(KMD_pid_file) as file:
KMD_pid = file.read()
file.close()
except Exception as e:
return False
break
if int(row.PID) == int(KMD_pid):
return True
break
#================================= Main Program =================================#
threads = []
assetChains = []
tmpList = []
alt_tmpList = []
HOME = os.environ['HOME']
#Get the chain names from the json file
try:
with open(HOME + '/StakedNotary/assetchains.json') as file:
assetchains = json.load(file)
except Exception as e:
#print(e)
#print("Trying alternate location for file")
with open(HOME + '/staked/assetchains.json') as file:
assetchains = json.load(file)
#Get the R addr from the config.ini
try:
with open(HOME + '/StakedNotary/config.ini', 'r') as file:
for line in file:
l = line.rstrip()
if re.search('Radd', l):
r_addr = l.replace('Radd = ', '')
except Exception as e:
#print(e)
#print("Trying alternate location for file")
with open(HOME + '/staked/config.ini', 'r') as file:
for line in file:
l = line.rstrip()
if re.search('Radd', l):
r_addr = l.replace('Radd =', '')
print("\nRadd = " + r_addr)
#Build the list of chains to report on
if running('KMD') == True:
assetChains.append('KMD')
for chain in assetchains:
if running(chain['ac_name']) == True:
assetChains.append(chain['ac_name'])
#Run the loops and threads
print_balance() |
app_manager.py | import json
import random
from datetime import datetime
from io import UnsupportedOperation
from os.path import join
from pathlib import Path
import numpy as np
from kivy.app import App
from kivy.clock import Clock
# from kivy.core.window import Window
from kivy.lang import Builder
from communicator import Communicator
from headless_utils import Diver, Player, PrintScore1Player
from headless_utils import Fish as FishHeadless
from headless_utils import JellySmile as JellySmileHeadless
from shared import SettingLoader
from widgets import Fish, FishingDerby, GreenDiver, TimeBoard, Stats, JellySmile
home = str(Path.home())
class FishingDerbyRLApp(App, SettingLoader, Communicator, PrintScore1Player):
def __init__(self, headless=True):
SettingLoader.__init__(self)
Communicator.__init__(self)
# self.show_every_n_episodes = 10
self.headless = headless
if not self.headless:
super().__init__()
self.jellys = {}
self.p1_rl_model = None
self.king_fish = None
self.state2ind = None
self.ind2state = None
self.actions = None
self.allowed_moves = None
self.actions2ind = None # ?
self.exploration = True
self.policy = None
self.episode_len_count = 0
self.episode_len = None
self.game_over = False
# Create class variables and set default values
self.king_fish = None
self.fishes = {} # Dictionary of fishes
self._cnt_steps = 0 # Count of the number of steps taken so far
self.move_x = [] # Next moves of the fishes in the x axis
self.move_y = [] # Next moves of the fishes in the y axis
self.action = "stay" # Actions received from player
self.time = 0 # Seconds since start
self.total_time = 60 # Total time of the game
self.player = None
self.time_board = None # Time board widget
# PID of the player loop in order to be able to kill it when the game is over
self.player_loop_pid = None
self.models = None
self.update_scheduled = None
self.timer_scheduled = None
self.space_subdivisions = 10
self.frames_per_action = 10
self.fishderby = None # Main widget of the game
self.n_jelly = 0
if not self.headless:
Builder.load_file('main.kv')
# Steps counter is a number that goes from 0 to frames_per_action
@property
def cnt_steps(self):
return self._cnt_steps % self.frames_per_action
@cnt_steps.setter
def cnt_steps(self, val):
self._cnt_steps = val
def set_player_loop_pid(self, pid):
self.player_loop_pid = pid
def create_player(self):
"""Always 1 player, that is 1 boat"""
self.player = Player()
def check_fish_near(self, boat):
"""
Catch a random fish that is on the same position as the boat if possible
:param boat: Boat. It must not have a caught fish.
:return:
"""
inds = np.random.permutation(len(self.fishes))
keys = list(self.fishes.keys())
for f in inds:
fish = self.fishes[keys[f]]
if fish.position == boat.hook.position and fish.caught is None:
return fish
def new_action(self, msg):
"""
Assign the new action coming from the message
:param msg: dict. Message coming from the receiver.
:return:
"""
self.action = msg["action"]
def send_state_or_display_stats(self):
"""
Send msg in order to indicate the player we have updated the game. If game has ended, display the stats screen.
"""
msg = {"game_over": self.game_over}
if self.game_over:
self.sender(msg)
if not self.headless:
self.display_stats()
self.update_scheduled.cancel()
# self.headless_thread.join()
return False
self.sender(msg)
return True
def fishes_next_move(self):
"""
Calculate and store, for every fish, the infinitesimal moving step for the position changing process.
After that, increase each fish's updates counter.
:return:
"""
self.move_x.clear()
self.move_y.clear()
for fish in self.fishes.values():
move_x, move_y = fish.next_movement_and_flip_horizontally()
self.move_x += [move_x / self.frames_per_action]
self.move_y += [move_y / self.frames_per_action]
fish.updates_cnt += 1
def introduce_diver(self, state2ind):
space_subdivisions = self.space_subdivisions
if self.headless:
diver = Diver(self.settings.init_pos_diver,
space_subdivisions=space_subdivisions,
states=state2ind,
stoch=self.settings.randomness)
else:
diver = GreenDiver(self.settings.init_pos_diver,
space_subdivisions=space_subdivisions,
source=f"images/scuba.png",
states=state2ind,
stoch=self.settings.randomness)
self.main_widget.ids.diver_layout.add_widget(diver)
self.player.diver = diver
def init_states(self):
subdivisions = self.space_subdivisions
state2ind = {}
ind2state = {}
state = 0
for i in range(subdivisions):
for j in range(subdivisions):
state2ind[state] = (i, j)
ind2state[(i, j)] = state
state += 1
self.state2ind = state2ind
self.ind2state = ind2state
def init_actions(self):
self.actions = {
"left": (-1, 0),
"right": (1, 0),
"down": (0, -1),
"up": (0, 1),
"stay": (0, 0)
}
self.actions2ind = {
"left": 0,
"right": 1,
"down": 2,
"up": 3,
"stay": 4
}
def receive_action_from_player(self):
msg = self.receiver()
self.exploration = msg["exploration"]
if self.exploration:
self.new_action(msg)
else:
self.policy = msg["policy"]
if not self.headless:
self.try_bind_diver_position()
self.init_clock()
def update_headless(self, dt):
if self.headless:
if self.exploration:
self.receive_action_from_player()
if self.exploration:
self.modify_action(noise=self.settings.randomness)
self.act_simulation(self.action)
else:
if self.cnt_steps == 0:
current_state = self.player.diver.position.x, self.player.diver.position.y
self.action = self.policy[current_state]
self.modify_action(noise=self.settings.randomness)
if self._cnt_steps == 0 or self.cnt_steps != 0:
self.act(self.action)
self._cnt_steps += 1
if self.cnt_steps == 0:
self.check_king_fish_caught()
self.check_jellyfish_touched()
if self.time >= self.total_time:
self.game_over = True
if self._cnt_steps >= self.episode_len:
self.game_over = True
self.send_state_or_display_stats()
self.time += 1.0 / self.settings.frames_per_second
def update(self, dt):
if not self.headless:
if self.exploration:
self.receive_action_from_player()
if self.exploration:
self.modify_action(noise=self.settings.randomness)
self.act_simulation(self.action)
else:
if self.cnt_steps == 0:
current_state = self.player.diver.position.x, self.player.diver.position.y
self.action = self.policy[current_state]
self.modify_action(noise=self.settings.randomness)
if self._cnt_steps == 0 or self.cnt_steps != 0:
self.act(self.action)
self._cnt_steps += 1
if self.cnt_steps == 0:
self.check_king_fish_caught()
self.check_jellyfish_touched()
if self.time >= self.total_time:
self.game_over = True
if self._cnt_steps >= self.episode_len * self.frames_per_action:
self.game_over = True
self.send_state_or_display_stats()
def init_jellyfishes(self):
pos_x = self.settings.jelly_x
pos_y = self.settings.jelly_y
for n, (x, y) in enumerate(zip(pos_x, pos_y)):
if not self.headless:
jelly = JellySmile(position=(x, y),
space_subdivisions=self.space_subdivisions,
score=self.settings.rewards[1 + n])
self.main_widget.ids.jelly_layout.add_widget(jelly)
else:
jelly = JellySmileHeadless(
position=(x, y),
space_subdivisions=self.space_subdivisions,
score=self.settings.rewards[1 + n])
self.jellys["jelly" + str(n)] = jelly
def init_king_fish(self):
posx, posy = self.settings.pos_king
score = self.settings.rewards[0]
name = "king_fish"
if self.headless:
fish = FishHeadless(init_state=(posx, posy),
type_fish="bowie",
name=name,
settings=self.settings,
score=score)
else:
fish = Fish(init_state=(posx, posy),
type_fish="bowie",
name=name,
settings=self.settings,
score=score)
self.main_widget.ids.fish_layout.add_widget(fish)
self.king_fish = fish
def check_jellyfish_touched(self):
diver = self.player.diver
for key in self.jellys.keys():
if self.jellys[key].position == diver.position:
self.player.score += self.jellys[key].score
self.jellys[key].touched = True
if not self.headless:
self.jellys[key].source = 'images/jelly_hurt.png'
def check_king_fish_caught(self):
"""
For every boat in the game, do one of:
1) if no fish is caught by it, check whether any can be caught
2) if a fish has been caught and the player is at the surface, finish pulling the rod
:return:
"""
diver = self.player.diver
if diver.has_fish is None:
self.check_king_fish_near(diver)
if self.king_fish.caught:
self.king_fish_caught = True
self.player.score += 100
self.game_over = True
elif diver.has_fish is not None:
self.king_fish_caught = True
self.player.score += 100
self.game_over = True
def check_king_fish_near(self, diver):
"""
Catch a random fish that is on the same position as the boat if possible
:param diver: Diver. It must not have a caught fish.
:return:
"""
if self.king_fish.position == diver.position:
self.king_fish.caught = True
def act_simulation(self, action):
"""
Function that simulates the reward given an action and a state
without the need of displaying it
:param action:
:return:
"""
reward, final_state = self.step(action)
self.episode_len_count += 1
if final_state or self.episode_len_count >= self.episode_len:
self.send_state(reward, end_episode=True)
# print("I caught fish {}".format(self.n_jelly))
x, y = self.settings.init_pos_diver
self.player.diver.position.set_x(x)
self.player.diver.position.set_y(y)
self.episode_len_count = 0
self.n_jelly = 0
else:
self.send_state(reward, end_episode=False)
return None
def next_state(self, state, action):
action_tuple = self.actions[action]
next_state = (state[0] + action_tuple[0], state[1] + action_tuple[1])
return next_state
def step(self, action):
ind_action = self.actions2ind[self.action]
current_state = self.player.diver.position.x, self.player.diver.position.y
ind_state = self.ind2state[current_state]
if not self.player.diver.model[ind_state, ind_action]:
reward = -100
is_final_state = False
return reward, is_final_state
else:
next_state_x, next_state_y = self.next_state(current_state, action)
next_state = next_state_x, next_state_y
self.player.diver.position.set_x(next_state_x)
self.player.diver.position.set_y(next_state_y)
reward, is_final_state = self.compute_reward(next_state)
return reward, is_final_state
def compute_reward(self, next_state):
next_state_x, next_state_y = next_state
reward = self.settings.rewards[-1] # changed from 0....
is_final_state = False
for jelly in range(len(self.jellys)):
if next_state_x == self.settings.jelly_x[jelly] and next_state_y == self.settings.jelly_y[jelly]:
reward = self.settings.rewards[jelly + 1]
self.n_jelly += 1
break
if next_state == tuple(self.settings.pos_king):
reward = self.king_fish.score
is_final_state = True
return reward, is_final_state
def send_state(self, reward, end_episode=False):
"""
Send msg in order to indicate the player we have updated the game. If game has ended, display the stats screen.
"""
msg = {
"game_over": self.game_over,
"state":
(self.player.diver.position.x, self.player.diver.position.y),
"reward": reward,
"end_episode": end_episode
}
self.sender(msg)
return True
def modify_action(self, noise=1):
if noise:
self.noisy_action()
else:
self.check_boundaries()
def check_boundaries(self):
current_state = self.player.diver.position.x, self.player.diver.position.y
state = self.ind2state[current_state]
action = self.actions2ind[self.action]
if not self.player.diver.model[state, action]:
self.action = "stay"
def noisy_action(self):
current_state = self.player.diver.position.x, self.player.diver.position.y
s = self.ind2state[current_state]
action_ind = self.actions2ind[self.action]
p = self.player.diver.transition_matrix[s, action_ind]
ind_action = np.random.choice(np.arange(0, 5), p=p)
noisy_action = list(self.actions2ind.keys())[ind_action]
self.action = noisy_action
def act(self, action):
"""
Update the position of the diver, either left, right, up or down (or stay).
:param action: String, either 'left', 'right', 'up', 'down' or 'stay'
:return:
"""
diver = self.player.diver
hook_speed = 1.0 / self.frames_per_action
if action == 'stay':
pass
else:
if action == 'left':
# 'left'
self.move_diver_x(diver, -hook_speed)
elif action == 'right':
# 'right'
self.move_diver_x(diver, hook_speed)
elif action == 'down':
# 'down'
self.move_diver_y(diver, -hook_speed)
elif action == 'up':
# 'up'
self.move_diver_y(diver, hook_speed)
@staticmethod
def move_diver_x(diver, speed):
diver.position.increase_x(speed)
@staticmethod
def move_diver_y(diver, speed):
diver.position.increase_y(speed)
# Methods dependent of Kivy
def init_clock(self):
"""
Initialize the timer
:return:
"""
self.total_time = self.settings.game_time
self.time_board = TimeBoard(seconds=int(self.total_time))
self.time_board.pos_hint['center_x'] = 0.5
if not self.headless:
self.main_widget.add_widget(self.time_board)
self.timer_scheduled = Clock.schedule_interval(self.update_clock, 1.0)
def build(self):
"""Initialize the Kivy screen"""
# Set sky color
from kivy.core.window import Window
Window.clearcolor = 63 / 255, 191 / 255, 191 / 255, 0.3
# Create main widget
self.main_widget = FishingDerby(fishes=self.fishes,
player=self.player,
settings=self.settings)
self.create_player()
self.init_king_fish()
self.init_jellyfishes()
self.init_states()
self.introduce_diver(self.state2ind)
self.init_actions()
self.episode_len = self.settings.episode_len
self.update_scheduled = Clock.schedule_interval(
self.update, 1.0 / self.settings.frames_per_second)
if not self.settings.visualize_exploration:
# temporarily unbind the diver's position from drawing it for exploration
self.player.diver.position.unbind(pos_x=self.player.diver.on_state)
self.player.diver.position.unbind(pos_y=self.player.diver.on_state)
# import threading
# self.headless_thread = threading.Thread(target=self.headless_mode)
# self.headless_thread.start()
# Kivy receives main widget and draws it
return self.main_widget
def headless_mode(self):
if self.headless:
self.create_player()
self.init_king_fish()
self.init_jellyfishes()
self.init_states()
self.introduce_diver(self.state2ind)
self.init_actions()
self.episode_len = self.settings.episode_len
self.time = 0 # Seconds since start
# Total time of the game
self.total_time = self.settings.game_time
while True:
self.update_headless(1)
if self.game_over:
break
res = self.check_sequence_and_kill_player_control()
return res
def update_clock(self, dl):
"""
Increase the clock by 1 second. If the remaining time is 0, the game is over.
:param dl: delta-time. Not used.
:return:
"""
if self.time_board.seconds == 0:
self.game_over = True
else:
self.time_board.seconds -= 1
self.time += 1.0
self.print_score()
def display_stats(self):
scores_file = join(home, ".fishing_derby_scores")
stats = Stats(self.player, self.settings, self.king_fish)
with open(scores_file, "a") as f:
try:
stats_file = json.load(f)
except UnsupportedOperation:
stats_file = dict()
stats_dict = stats.get_stats()
stats_file[datetime.now().timestamp()] = stats_dict
json.dump(stats_file, f)
stats.load(stats_dict)
stats.open()
def try_bind_diver_position(self):
if not self.settings.visualize_exploration:
# rebind the diver's position for drawing it during simulation
self.player.diver.position.bind(pos_x=self.player.diver.on_state)
self.player.diver.position.bind(pos_y=self.player.diver.on_state)
# Headless mode methods
def run_headless(self):
"""Initialize the testing environment"""
# kivy inherited stuff
if self.headless:
self.headless_mode()
else:
self.run()
# Tester specific methods
def print_tester_results(self):
print("".join([(str(1) if i is True else str(0))
for i in self.passes.values()]))
def check_score_threshold(self):
return self.player.score >= 100
def check_sequence_and_kill_player_control(self):
passed = self.check_score_threshold()
return passed
def reset_scores(self):
self.player.score = 0
@staticmethod
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
|
frequency.py | #
# This script verifies the frequency at which the
# sled server returns position information.
#
# Scenarios:
# - Single client
# - Multiple streaming clients start before and end after test
# - Other clients start at random times within the measurement
#
from testclient import SledClient
import time
import threading
class TestFrequency():
client = None
ctime = list()
stime = list()
position = list()
def positionCallback(self, position, time):
self.ctime.append(self.client.time())
self.stime.append(time)
self.position.append(position[0][0])
def run(self, interval):
self.client = SledClient()
self.client.connect("sled", 3375)
self.client.startStream()
self.ctime = list()
self.stime = list()
self.position = list()
self.client._positionCallback = self.positionCallback
time.sleep(interval)
sum_interval = 0
num_samples = len(self.stime)
self.client.close()
for i in range(len(self.stime) - 1):
interval = (self.stime[i + 1] - self.stime[i])
sum_interval += interval
if sum_interval == 0:
return False
avg_frequency = num_samples / sum_interval
print "Mean interval: {:.2f}ms; Frequency: {:.2f}Hz".format(1000 / avg_frequency, avg_frequency )
if avg_frequency < 990 or avg_frequency > 1010:
return False
return True
def simpleClient(start, duration):
client = SledClient()
time.sleep(start)
client.connect("sled", 3375)
client.startStream()
time.sleep(duration)
client.close()
def spawnClient(start, duration):
thread = threading.Thread(target = lambda: simpleClient(start, duration))
thread.setDaemon(True)
thread.start()
test = TestFrequency()
# Run for five seconds
if not test.run(5):
print "Test run failed, frequency not within specification"
# Spawn clients before measurement and see how far we can go
for nClients in [1, 2, 4, 8, 16, 32, 64, 128, 256]:
print "Running test with {} concurrent clients".format(nClients)
for i in range(nClients):
spawnClient(0, 10)
time.sleep(3)
if not test.run(5):
print " - Test failed"
time.sleep(5)
|
__init__.py | # -*- coding: utf-8 -*-
"""
Gestión de datos recogidos en web de forma periódica
@author: Eugenio Panadero
"""
import threading
import time
import numpy as np
__author__ = 'Eugenio Panadero'
__copyright__ = "Copyright 2015, AzogueLabs"
__credits__ = ["Eugenio Panadero"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Eugenio Panadero"
def procesa_tareas_paralelo(lista_tareas, dict_data, func_process,
titulo=None, usar_multithread=True, max_threads=100, verbose=True):
"""
Procesa las tareas diarias en paralelo, limitando a un MAX de nº de threads.
Especialmente útil para realizar requests simultáneos a un servidor web concreto.
:param lista_tareas: Recibe una lista de tareas únicas (key_tarea) a realizar
:param dict_data: Diccionario de la forma '{key_tarea : variable_in_out}', de forma que cada hilo de ejecución
opera con su clave de tarea, tomando los datos del diccionario y depositando su salida
en el mismo lugar.
:param func_process: Necesita el puntero a función 'func_process', cuya definición debe ser de la forma:
'func_process(key_tarea, dict_data_in_out)
:param titulo:
:param usar_multithread: True por defecto
:param max_threads: 100 por defecto
:param verbose: True por defecto
"""
num_tareas = len(lista_tareas)
if titulo and num_tareas > 1:
print(titulo % num_tareas)
if num_tareas > 1 and usar_multithread:
tic_init = time.time()
threads = [threading.Thread(target=func_process, args=(tarea, dict_data,)) for tarea in lista_tareas]
lista_threads = [threads[i:i + int(max_threads)] for i in np.arange(0, len(threads), int(max_threads))]
cont_tareas = 0
for th in lista_threads:
tic = time.time()
[thread.start() for thread in th]
[thread.join() for thread in th]
if verbose:
print("Procesado de tareas en paralelo [%lu->%lu, %%=%.1f]: %.2f seg [%.4f seg/tarea]"
% (cont_tareas + 1, cont_tareas + len(th), 100. * (cont_tareas + len(th)) / float(num_tareas),
(time.time() - tic), (time.time() - tic) / len(th)))
cont_tareas += len(th)
tic_fin = (time.time() - tic_init)
if num_tareas > 1 and usar_multithread and len(lista_threads) > 1 and verbose:
print("Tiempo de proceso de tareas en paralelo TOTAL ({} tareas): {:.2f} seg [{:.4f} seg/tarea]"
.format(num_tareas, tic_fin, tic_fin / num_tareas))
else:
for tarea in lista_tareas:
if num_tareas > 3 and verbose:
print('Tarea: %s' % str(tarea))
func_process(tarea, dict_data)
|
kalmanVideoMultiCFI.py | import time
import cv2
import math
import numpy as np
import multiprocessing
import matplotlib.pyplot as plt
from Modules.foregroundExtraction import readyFrame, frameDifferencing, morphologicalOperations, natural_sort, readySingleFrame
from Modules.ballDetection import findContours, sizeDetection, playerProximityDetection, regionDetection, courtBoundaryDetection
# Initializing
datasetName = "Dataset2"
if (datasetName == "Dataset1"):
startFrameDataset = 65
endFrameDataset = 560
elif (datasetName == "Dataset2"):
startFrameDataset = 35
endFrameDataset = 215
dictFrameNumberscX = {}
dictFrameNumberscY = {}
ballCandidatesPreviousFrame = list()
#Profiling Structures
trackingTime = list()
detectionTime = list()
feTime = list()
processTime = list()
#Reading frames
startTimeReadingFrames = time.time()
# Creating Video Object
cap = cv2.VideoCapture('DatasetVideos/'+datasetName+'.mp4')
cap.set(cv2.CAP_PROP_POS_FRAMES, startFrameDataset)
endTimeReadingFrames = time.time()
print("Reading Frames--- %s seconds ---" %
(endTimeReadingFrames - startTimeReadingFrames))
#Kalman Initialization
startKalmanInitTime = time.time()
mp = np.array((2, 1), np.float32) # measurement
tp = np.zeros((2, 1), np.float32) # tracked / prediction
kalman = cv2.KalmanFilter(4, 2)
kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
kalman.transitionMatrix = np.array(
[[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
kalman.processNoiseCov = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.009
kalman.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 0.00003
endKalmanInitTime = time.time()
i = 0 #Keeping track of the frame number
while (cap.isOpened()):
print("######Start of Frame#####")
if(i == 0): # If first frame read 3 frames
ret1, previousFrame = cap.read()
ret2, currFrame = cap.read()
ret3, nextFrame = cap.read()
else: # Read just the next frame from the 2nd frame onwards
previousFrame = currFrame
currFrame = nextFrame
ret, nextFrame = cap.read()
print("Frame Number {}".format(i + 1))
#
#
# FOREGROUND EXTRACTION
#
#
startTimeForeGroundExtraction = time.time()
# Readying the frames
# previousFrameGray, currFrameGray, nextFrameGray = readyFrame(
# previousFrame, currFrame, nextFrame)
frameList = [previousFrame,currFrame,nextFrame]
p = multiprocessing.Pool()
readyFrameList = p.map(readySingleFrame, frameList)
previousFrameGray = readyFrameList[0]
currFrameGray = readyFrameList[1]
nextFrameGray = readyFrameList[2]
# p1 = multiprocessing.Process(target=readySingleFrame, args=(previousFrame, ))
# p2 = multiprocessing.Process(target=readySingleFrame, args=(currFrame,))
# p3 = multiprocessing.Process(target=readySingleFrame, args=(nextFrame,))
# Performing frame differencing
threshFrameDifferencing = frameDifferencing(
previousFrameGray, currFrameGray, nextFrameGray)
# Performing morphological operations
final_image = morphologicalOperations(threshFrameDifferencing, 4, 4)
# final_image = cv2.medianBlur(final_image, 7)
# cv2.imshow('final image', final_image)
endTimeForegroundExtraction = time.time()
print("Foreground Extraction--- %s seconds ---" %
(endTimeForegroundExtraction - startTimeForeGroundExtraction))
feTime.append(endTimeForegroundExtraction - startTimeForeGroundExtraction) #Profiling
#
#
# BALL DETECTION
#
#
startTimeBallDetection = time.time()
# Finding contours in the frame
contours, hier = findContours(final_image)
# Separating candidates based on size
ballCandidates, playerCadidates, incompletePlayerCandidates = sizeDetection(
contours, currFrame, i)
# Removing candidates outside the Court Boundary in Dataset2
if (datasetName == 'Dataset2'):
ballCandidates, playerCadidates, incompletePlayerCandidates = courtBoundaryDetection(
ballCandidates, playerCadidates, incompletePlayerCandidates, currFrame)
# Removing Candidates that are close to the Players
ballCandidatesFiltered = playerProximityDetection(
ballCandidates, playerCadidates, incompletePlayerCandidates, currFrame)
# Removing candidates that are not in their expected region after motion
ballCandidatesFilteredProximity, ballCandidatesPreviousFrame = regionDetection(
ballCandidatesFiltered, ballCandidatesPreviousFrame, currFrame)
endTimeBallDetection = time.time()
print("Ball Detection--- %s seconds ---" %
(endTimeBallDetection - startTimeBallDetection))
detectionTime.append(endTimeBallDetection - startTimeBallDetection) #Profiling
#
#
# BALL TRACKING
#
#
startKalmanPredTime = time.time()
# Calculating the centre of the image frame for initstate
height, width, channels = currFrame.shape
imageCenter = [width/2, height/2]
# First Frame
if (i + 1 == 1):
# If no candidate detected, use image centre as initial state
if not ballCandidatesFilteredProximity:
initstate = imageCenter
# If Candidates detected
else:
# If a single candidate detected, use it for the initial state
if (len(ballCandidatesFilteredProximity) == 1):
x = ballCandidatesFilteredProximity[0][0]
y = ballCandidatesFilteredProximity[0][1]
mp = np.array([[np.float32(x)], [np.float32(y)]])
initstate = [mp[0], mp[1]]
# If multiple candidates, calculate candidate closest to the image centre for initial state
else:
minDistInitCand = 10000
for cand in ballCandidatesFilteredProximity:
distCenter = math.sqrt(math.pow(
(cand[0] - imageCenter[0]), 2) + math.pow((cand[1] - imageCenter[1]), 2))
if (distCenter < minDistInitCand):
initstate = [cand[0], cand[1]]
minDistInitCand = distCenter
# Using Initstate for First Prediction
tp[0] = initstate[0]
tp[1] = initstate[1]
cv2.circle(currFrame, (tp[0], tp[1]), 10, (0, 0, 255), -1)
dictFrameNumberscX[i + 1] = tp[0]
dictFrameNumberscY[i + 1] = tp[1]
if(__debug__):
cv2.imshow('Candidate image', currFrame)
# If not the first frame
else:
# Do Prediction
tp = kalman.predict()
tp[0] = tp[0] + initstate[0]
tp[1] = tp[1] + initstate[1]
# If one candidate, measure and correct
if (len(ballCandidatesFilteredProximity) == 1):
for cand in ballCandidatesFilteredProximity:
# distncePredAct = math.sqrt(
# math.pow((cand[0] - tp[0]), 2) + math.pow((cand[1] - tp[1]), 2))
x = cand[0]
y = cand[1]
x = x - initstate[0]
y = y - initstate[1]
mp = np.array([[np.float32(x)], [np.float32(y)]])
corrected = kalman.correct(mp)
corrected[0] = corrected[0] + initstate[0]
corrected[1] = corrected[1] + initstate[1]
cv2.circle(
currFrame, (corrected[0], corrected[1]), 10, (0, 255, 0), -1)
dictFrameNumberscX[i + 1] = corrected[0]
dictFrameNumberscY[i + 1] = corrected[1]
# cv2.circle(currFrame, (tp[0], tp[1]),
# 10, (0, 0, 255), -1) # pred
# #drawing a line
# cv2.line(currFrame, (int(cand[0]), int(cand[1])), (int(
# tp[0]), int(tp[1])), (255, 0, 0), 2)
# xmidPointPlayer = (cand[0]+tp[0])*0.5
# ymidPointPlayer = (cand[1]+tp[1])*0.5
# cv2.putText(currFrame, str(round(distncePredAct,2)), (int(xmidPointPlayer), int(
# ymidPointPlayer)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# print("Distance predact {}".format(distncePredAct))
cv2.drawContours(currFrame, [cand[3]], -1, (255, 0,), 2)
cv2.putText(currFrame, str(
cand[2]), (cand[0] + 1, cand[1] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if(__debug__):
cv2.imshow('Candidate image', currFrame)
# If multiple candidates,
elif(len(ballCandidatesFilteredProximity) > 1):
minDistObject = 1000
minDistXcoord = 0
minDistYcoord = 0
# Calculate candidate closest to the prediction
for cand in ballCandidatesFilteredProximity:
distncePredAct = math.sqrt(
math.pow((cand[0] - tp[0]), 2) + math.pow((cand[1] - tp[1]), 2))
# #drawing a line
# cv2.line(currFrame, (int(cand[0]), int(cand[1])), (int(
# tp[0]), int(tp[1])), (255, 0, 0), 2)
# xmidPointPlayer = (cand[0]+tp[0])*0.5
# ymidPointPlayer = (cand[1]+tp[1])*0.5
# cv2.putText(currFrame, str(round(distncePredAct,2)), (int(xmidPointPlayer), int(
# ymidPointPlayer)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# print("Distance predact {}".format(distncePredAct))
if (distncePredAct < 50):
if (distncePredAct < minDistObject):
minDistObject = distncePredAct
minDistXcoord = cand[0]
minDistYcoord = cand[1]
# If no candidate is close to the prediction, predict only
if (minDistObject == 1000):
cv2.circle(currFrame, (tp[0], tp[1]), 10, (0, 0, 255), -1)
dictFrameNumberscX[i + 1] = tp[0]
dictFrameNumberscY[i + 1] = tp[1]
# If a candidate close to the prediction, use it for measurement and correction
else:
x = minDistXcoord
y = minDistYcoord
x = x - initstate[0]
y = y - initstate[1]
mp = np.array([[np.float32(x)], [np.float32(y)]])
corrected = kalman.correct(mp)
corrected[0] = corrected[0] + initstate[0]
corrected[1] = corrected[1] + initstate[1]
cv2.circle(
currFrame, (corrected[0], corrected[1]), 10, (0, 255, 0), -1)
dictFrameNumberscX[i + 1] = corrected[0]
dictFrameNumberscY[i+1] = corrected[1]
cv2.drawContours(currFrame, [cand[3]], -1, (255, 0,), 2)
cv2.putText(currFrame, str(
cand[2]), (cand[0] + 1, cand[1] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if(__debug__):
cv2.imshow('Candidate image', currFrame)
# If no candidate detected, predict only
else:
cv2.circle(currFrame, (tp[0], tp[1]), 10, (0, 0, 255), -1)
dictFrameNumberscX[i + 1] = tp[0]
dictFrameNumberscY[i+1] = tp[1]
if(__debug__):
cv2.imshow('Candidate image', currFrame)
endKalmanPredTime = time.time()
trackingTime.append((endKalmanPredTime -
startKalmanPredTime)+(endKalmanInitTime-startKalmanInitTime))
print("Ball Tracking in --- %s seconds ---" % ((endKalmanPredTime -
startKalmanPredTime)+(endKalmanInitTime-startKalmanInitTime)))
processTime.append((endTimeForegroundExtraction - startTimeForeGroundExtraction)+(endTimeBallDetection - startTimeBallDetection)+((endKalmanPredTime -
startKalmanPredTime)+(endKalmanInitTime-startKalmanInitTime))) #Profiling
# Print Ball Trajectory 2D Feature Image
if (((i + 1) % endFrameDataset) == 0):
print("Average FE Time: {}".format(
sum(feTime)/(endFrameDataset-startFrameDataset)))
print("Average Detection Time: {}".format(
sum(detectionTime)/(endFrameDataset-startFrameDataset)))
print("Average Tracking Time: {}".format(
sum(trackingTime) / (endFrameDataset - startFrameDataset)))
print("Average Total Process Time: {}".format(
sum(processTime) / (endFrameDataset - startFrameDataset)))
keys = list(dictFrameNumberscX.keys())
xvalues = list(dictFrameNumberscX.values())
yvalues = list(dictFrameNumberscY.values())
plt.xlabel('Frame Number')
plt.ylabel('Candidate Kalman X-Coordinate')
plt.title('CFI with Kalman X Prediction')
plt.plot(keys, xvalues, 'r--', linewidth=2)
# plt.axis([-20, 600, 0, 1300])
# plt.axis([-20,210,100,1200])
plt.show()
plt.xlabel('Frame Number')
plt.ylabel('Candidate Kalman Y-Coordinate')
plt.title('CFI with Kalman Y Prediction')
plt.plot(keys, yvalues, 'g--', linewidth=2)
# plt.axis([-20, 600, 25, 1000])
# plt.axis([-20,210,50,900])
plt.show()
# scatter plot
# print(dictFrameNumberscY)
# for data_dict in dictFrameNumberscX.items():
# print(data_dict)
# x = data_dict[0]
# values = data_dict[1]
# for value in values:
# # plt.subplot(1, 2, 1)
# plt.scatter(x,value)
# plt.xlabel('Frame Number')
# plt.ylabel('Candidate X-Coordinate')
# plt.title("Candidate Feature Image X-coordinate")
# dictFrameNumberscX.clear()
# plt.show()
# plt.xlabel('Frame Number')
# plt.ylabel('Candidate Kalman Y-Coordinate')
# plt.title('CFI with Kalman Y Prediction')
# plt.plot(keys, yvalues, 'g--', linewidth=2)
# plt.show()
print("######End of Frame#####")
i += 1 # increments the loop
# Exits the loop when Esc is pressed, goes to previous frame when space pressed and goes to next frame when any other key is pressed
if(__debug__):
k = cv2.waitKey(0)
if k == 27:
break
elif k == 32:
i -= 2
else:
continue
|
test_context.py | import logging
import mock
import threading
import pytest
from ddtrace.span import Span
from ddtrace.context import Context
from ddtrace.constants import HOSTNAME_KEY
from ddtrace.ext.priority import USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP
from .test_tracer import get_dummy_tracer
from tests import BaseTestCase
@pytest.fixture
def tracer_with_debug_logging():
# All the tracers, dummy or not, shares the same logging object.
tracer = get_dummy_tracer()
level = tracer.log.level
tracer.log.setLevel(logging.DEBUG)
try:
yield tracer
finally:
tracer.log.setLevel(level)
@mock.patch("logging.Logger.debug")
def test_log_unfinished_spans(log, tracer_with_debug_logging):
# when the root parent is finished, notify if there are spans still pending
tracer = tracer_with_debug_logging
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name="root")
child_1 = Span(tracer=tracer, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name="child_2", trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
unfinished_spans_log = log.call_args_list[-3][0][2]
child_1_log = log.call_args_list[-2][0][1]
child_2_log = log.call_args_list[-1][0][1]
assert 2 == unfinished_spans_log
assert "name child_1" in child_1_log
assert "name child_2" in child_2_log
assert "duration 0.000000s" in child_1_log
assert "duration 0.000000s" in child_2_log
class TestTracingContext(BaseTestCase):
"""
Tests related to the ``Context`` class that hosts the trace for the
current execution flow.
"""
def test_add_span(self):
# it should add multiple spans
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
assert 1 == len(ctx._trace)
assert "fake_span" == ctx._trace[0].name
assert ctx == span.context
def test_context_sampled(self):
# a context is sampled if the spans are sampled
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finished = True
trace, sampled = ctx.close_span(span)
assert sampled is True
assert ctx.sampling_priority is None
def test_context_priority(self):
# a context is sampled if the spans are sampled
ctx = Context()
for priority in [USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP, None, 999]:
ctx.sampling_priority = priority
span = Span(tracer=None, name=("fake_span_%s" % repr(priority)))
ctx.add_span(span)
span.finished = True
# It's "normal" to have sampled be true even when priority sampling is
# set to 0 or -1. It would stay false even even with priority set to 2.
# The only criteria to send (or not) the spans to the agent should be
# this "sampled" attribute, as it's tightly related to the trace weight.
assert priority == ctx.sampling_priority
trace, sampled = ctx.close_span(span)
assert sampled is True, "priority has no impact on sampled status"
def test_current_span(self):
# it should return the current active span
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
assert span == ctx.get_current_span()
def test_current_root_span_none(self):
# it should return none when there is no root span
ctx = Context()
assert ctx.get_current_root_span() is None
def test_current_root_span(self):
# it should return the current active root span
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
assert span == ctx.get_current_root_span()
def test_close_span(self):
# it should keep track of closed spans, moving
# the current active to its parent
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
ctx.close_span(span)
assert ctx.get_current_span() is None
def test_get_trace(self):
# it should return the internal trace structure
# if the context is finished
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finished = True
trace, sampled = ctx.close_span(span)
assert [span] == trace
assert sampled is True
# the context should be empty
assert 0 == len(ctx._trace)
assert ctx._current_span is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(self, get_hostname):
get_hostname.return_value = "test-hostname"
with self.override_global_config(dict(report_hostname=True)):
# Create a context and add a span and finish it
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finish()
assert span.get_tag(HOSTNAME_KEY) == "test-hostname"
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(self, get_hostname):
get_hostname.return_value = "test-hostname"
with self.override_global_config(dict(report_hostname=False)):
# Create a context and add a span and finish it
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finished = True
# Assert that we have not added the tag to the span yet
assert span.get_tag(HOSTNAME_KEY) is None
# Assert that retrieving the trace does not set the tag
trace, _ = ctx.close_span(span)
assert trace[0].get_tag(HOSTNAME_KEY) is None
assert span.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(self, get_hostname):
get_hostname.return_value = "test-hostname"
# Create a context and add a span and finish it
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finished = True
# Assert that we have not added the tag to the span yet
assert span.get_tag(HOSTNAME_KEY) is None
# Assert that retrieving the trace does not set the tag
trace, _ = ctx.close_span(span)
assert trace[0].get_tag(HOSTNAME_KEY) is None
assert span.get_tag(HOSTNAME_KEY) is None
def test_finished(self):
# a Context is finished if all spans inside are finished
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
ctx.close_span(span)
@mock.patch("logging.Logger.debug")
def test_log_unfinished_spans_disabled(self, log):
# the trace finished status logging is disabled
tracer = get_dummy_tracer()
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name="root")
child_1 = Span(tracer=tracer, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name="child_2", trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
assert "the trace has %d unfinished spans" not in msg
@mock.patch("logging.Logger.debug")
def test_log_unfinished_spans_when_ok(self, log):
# if the unfinished spans logging is enabled but the trace is finished, don't log anything
tracer = get_dummy_tracer()
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name="root")
child = Span(tracer=tracer, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
# close the trace
child.finish()
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
assert "the trace has %d unfinished spans" not in msg
def test_thread_safe(self):
# the Context must be thread-safe
ctx = Context()
def _fill_ctx():
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
assert 100 == len(ctx._trace)
def test_clone(self):
ctx = Context()
ctx.sampling_priority = 2
# manually create a root-child trace
root = Span(tracer=None, name="root")
child = Span(tracer=None, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
cloned_ctx = ctx.clone()
assert cloned_ctx._parent_trace_id == ctx._parent_trace_id
assert cloned_ctx._parent_span_id == ctx._parent_span_id
assert cloned_ctx._sampling_priority == ctx._sampling_priority
assert cloned_ctx._dd_origin == ctx._dd_origin
assert cloned_ctx._current_span == ctx._current_span
assert cloned_ctx._trace == []
|
baidu.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from mysql_connect import MysqlTaskConfig
from skywalking import agent, config
from skywalking.decorators import runnable
from spider import spider
if __name__ == '__main__':
config.service_name = 'baidu.com'
config.logging_level = 'DEBUG'
config.flask_collect_http_params = True
config.collector_address = "172.17.2.64:11800"
agent.start()
from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/baidu", methods=["POST", "GET"])
def application():
from skywalking.trace.context import get_context
get_context().put_correlation("correlation", "correlation")
# @runnable(op="/test")
# def post():
# requests.post("http://127.0.0.1:9092/users")
#
# from threading import Thread
# t = Thread(target=post)
# t.start()
#
# res = requests.post("http://127.0.0.1:9092/users")
#
# t.join()
mysqldb = MysqlTaskConfig().get_instance()
spider(65, 66, mysqldb, "baidu")
requests.get("http://127.0.0.1:9081/douban1")
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers=['127.0.0.1:9092'], api_version=(1, 0, 1))
producer.send('skywalking', b'baidu')
return jsonify({"status":"okokokok"})
@app.route("/baidu1", methods=["POST", "GET"])
def application1():
return jsonify({"status":"okokokok"})
PORT = 9080
app.run(host='0.0.0.0', port=PORT, debug=True)
|
test_launcher.py | import pytest
import multiprocessing as mp
from pathlib import Path
import json
import copy
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
LAUNCH_ARGS = dict(
scene='',
script=str(BLENDDIR/'launcher.blend.py'),
num_instances=2,
named_sockets=['DATA', 'GYM'],
background=True,
instance_args=[['--x', '3'],['--x', '4']],
seed=10
)
def _validate_result(items):
assert len(items) == 2
first, second = 0, 1
if items[0]['btid']==1:
first, second = second, first
assert items[first]['btargs']['btid']==0
assert items[second]['btargs']['btid']==1
assert items[first]['btargs']['btseed']==10
assert items[second]['btargs']['btseed']==11
assert items[first]['btargs']['btsockets']['DATA'].startswith('tcp://')
assert items[first]['btargs']['btsockets']['GYM'].startswith('tcp://')
assert items[second]['btargs']['btsockets']['DATA'].startswith('tcp://')
assert items[second]['btargs']['btsockets']['GYM'].startswith('tcp://')
assert items[first]['remainder'] == ['--x', '3']
assert items[second]['remainder'] == ['--x', '4']
@pytest.mark.background
def test_launcher():
with btt.BlenderLauncher(**LAUNCH_ARGS) as bl:
addr = bl.launch_info.addresses['DATA']
ds = btt.RemoteIterableDataset(addr, max_items=2)
items = [item for item in ds]
_validate_result(items)
def _launch(q, tmp_path):
with btt.BlenderLauncher(**LAUNCH_ARGS) as bl:
path = Path(tmp_path / 'addresses.json')
btt.LaunchInfo.save_json(path, bl.launch_info)
q.put(path)
bl.wait()
@pytest.mark.background
def test_launcher_connected_remote(tmp_path):
# Simulates BlenderLauncher called from a separate process and
# shows how one can connect to already launched instances through
# serialization of addresses.
q = mp.Queue()
p = mp.Process(target=_launch, args=(q, tmp_path))
p.start()
path = q.get()
launch_info = btt.LaunchInfo.load_json(path)
ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2)
items = [item for item in ds]
_validate_result(items)
p.join()
def _launch_app(tmp_path, args):
from blendtorch.btt.apps import launch
with open(tmp_path/'launchargs.json', 'w') as fp:
fp.write(json.dumps(args, indent=4))
launch.main(['--out-launch-info', str(tmp_path/'launchinfo.json'), str(tmp_path/'launchargs.json')])
@pytest.mark.background
def test_launcher_app(tmp_path):
p = mp.Process(target=_launch_app, args=(tmp_path, LAUNCH_ARGS))
p.start()
import time
path = tmp_path/'launchinfo.json'
while not Path.exists(path):
time.sleep(1)
launch_info = btt.LaunchInfo.load_json(path)
ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2)
items = [item for item in ds]
_validate_result(items)
p.join()
def test_launcher_app_primaryip(tmp_path):
# Same with primary ip resolver
args = copy.deepcopy(LAUNCH_ARGS)
args['bind_addr'] = 'primaryip'
p = mp.Process(target=_launch_app, args=(tmp_path,args))
p.start()
import time
path = tmp_path/'launchinfo.json'
while not Path.exists(path):
time.sleep(1)
launch_info = btt.LaunchInfo.load_json(path)
print(launch_info.addresses)
ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2)
items = [item for item in ds]
_validate_result(items)
p.join()
|
batcher.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to process data into batches"""
import queue as Queue
from random import shuffle
from threading import Thread
import time
import numpy as np
import tensorflow as tf
from tools import data
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hps.max_enc_steps:
article_words = article_words[:hps.max_enc_steps]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in
article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in
abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding,
stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding,
stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.
self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning(
'Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i',
self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = next(
input_gen) # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info(
"single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
abstract_sentences = [sent.strip() for sent in data.abstract2sents(
abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
if self._hps.mode != 'decode':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in range(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx, t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx, t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = next(example_generator) # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[
0].decode() # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[
0].decode() # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(article_text) == 0: # See https://github.com/abisee/pointer-generator/issues/1
tf.logging.warning('Found an example with empty article text. Skipping it.')
else:
yield (article_text, abstract_text)
if __name__ == '__main__':
from tools.data import Vocab
from collections import namedtuple
vocab_path = "/mnt/home/jonathan/datasets/cnn-dailymail/cnn_dailymail_data/finished_files/vocab"
data_path = "/mnt/home/jonathan/datasets/cnn-dailymail/cnn_dailymail_data/finished_files/chunked/train_*"
vocab = Vocab(vocab_path, 50000)
hps_dict = {
"batch_size": 1,
"max_enc_steps": 400,
"max_dec_steps": 100,
"pointer_gen": True,
"mode": "train",
}
hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
batcher = Batcher(data_path, vocab, hps, single_pass=False)
batch = batcher.next_batch()
print(batch)
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
@unittest.skip("TODO: RUSTPYTHON, specifics of operation order in close()")
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON, AssertionError: filter ('', ResourceWarning) did not catch any warning
@unittest.expectedFailure
def test_destructor(self):
super().test_destructor(self)
class PyIOTest(IOTest):
def test_optional_abilities(self):
super().test_optional_abilities()
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
@unittest.skip("TODO: RUSTPYTHON, specifics of operation order in close()")
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
@unittest.skip("TODO: RUSTPYTHON, BufferError: Existing exports of data: object cannot be re-sized")
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
@unittest.skip('TODO: RUSTPYTHON')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
@unittest.skip('TODO: RUSTPYTHON')
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
super().test_constructor()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_encoding_read(self):
super().test_reconfigure_encoding_read()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic_io(self):
super().test_basic_io()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_telling(self):
super().test_telling()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_uninitialized(self):
super().test_uninitialized()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_non_text_encoding_codecs_are_rejected(self):
super().test_non_text_encoding_codecs_are_rejected()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr(self):
super().test_repr()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines_input(self):
super().test_newlines_input()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_one_by_one(self):
super().test_read_one_by_one()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_by_chunk(self):
super().test_read_by_chunk()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_1(self):
super().test_issue1395_1()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_2(self):
super().test_issue1395_2()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_3(self):
super().test_issue1395_3()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_4(self):
super().test_issue1395_4()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_5(self):
super().test_issue1395_5()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_through(self):
super().test_reconfigure_write_through()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_fromascii(self):
super().test_reconfigure_write_fromascii()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write(self):
super().test_reconfigure_write()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_non_seekable(self):
super().test_reconfigure_write_non_seekable()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_defaults(self):
super().test_reconfigure_defaults()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_newline(self):
super().test_reconfigure_newline()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
def test_line_buffering(self):
super().test_line_buffering()
def test_seeking_too(self):
super().test_seeking_too()
def test_bufio_write_through(self):
super().test_bufio_write_through()
def test_seeking(self):
super().test_seeking()
@unittest.skip("TODO: RUSTPYTHON, incremental decoder")
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
# XXX RUSTPYTHON: universal mode is deprecated anyway, so I
# feel fine about skipping it
# with support.check_warnings(('', DeprecationWarning)):
# f = self.open(support.TESTFN, "U")
# self.assertEqual(f.name, support.TESTFN)
# self.assertEqual(f.buffer.name, support.TESTFN)
# self.assertEqual(f.buffer.raw.name, support.TESTFN)
# self.assertEqual(f.mode, "U")
# self.assertEqual(f.buffer.mode, "rb")
# self.assertEqual(f.buffer.raw.mode, "rb")
# f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
@unittest.skip("TODO: RUSTPYTHON, check if fd is seekable fileio")
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
@unittest.skip("TODO: RUSTPYTHON, hangs?")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__# + ["IncrementalNewlineDecoder"] XXX RUSTPYTHON
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
__main__.py | from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'lyse.svg'))
splash.show()
splash.update_text('importing standard library modules')
# stdlib imports
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# 3rd party imports:
splash.update_text('importing numpy')
import numpy as np
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
splash.update_text('importing pandas')
import pandas
splash.update_text('importing Qt')
check_version('qtutils', '2.1.0', '3.0.0')
splash.update_text('importing zprocess')
import zprocess.locking
from zprocess import ZMQServer
splash.update_text('importing labscript suite modules')
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.outputbox import OutputBox
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
from lyse import LYSE_DIR
# Set a meaningful name for zprocess.locking's client id:
zprocess.locking.set_client_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# convert_objects() picks fixed datatypes for columns that are
# compatible with fixed datatypes, dramatically speeding up
# pickling. But we don't impose fixed datatypes earlier than now
# because the user is free to use mixed datatypes in a column, and
# we won't want to prevent values of a different type being added
# in the future. All kwargs False because we don't want to coerce
# strings to numbers or anything - just choose the correct
# datatype for columns that are already a single datatype:
return app.filebox.shots_model.dataframe.convert_objects(
convert_dates=False, convert_numeric=False, convert_timedeltas=False)
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
def closeEvent(self, event):
if app.on_close_event():
return QtWidgets.QMainWindow.closeEvent(self, event)
else:
event.ignore()
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
worker_path = os.path.join(LYSE_DIR, 'analysis_subprocess.py')
child_handles = zprocess.subprocess_with_queues(worker_path, self.output_box_port)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit',None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'routinebox.ui'))
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'edit_columns.ui'), EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
headerview_style = """
QHeaderView {
font-size: 8pt;
color: black;
}
QHeaderView::section{
font-size: 8pt;
color: black;
}
"""
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
self._vertheader.setStyleSheet(headerview_style)
self._header.setStyleSheet(headerview_style)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, status_percent=None, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model
to the data in the HDF5 file for that shot. Also sets the percent done, if specified"""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.get_value(row_number, filepath_colname)
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.set_value(row_number, column_name, value)
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.set_value(row_number, column_name, None)
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.set_value(row_number, column_name, value)
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
if new_column_names:
# Update the visibility state of new columns, in case some new columns are hidden:
self.set_columns_visible
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None and column_name not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if status_percent is not None:
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
def new_row(self, filepath, done=False):
status_item = QtGui.QStandardItem()
if done:
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
else:
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. |'.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
try:
val = self.dataframe[col].values[row_number]
header_strings.append(' {:04d}'.format(val))
except (KeyError, ValueError):
header_strings.append('----')
vert_header_text += ' |'.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += ' ' + basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data, done=False):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath, done=done))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'filebox.ui'))
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually running:
self.shots_model.update_row(filepath, status_percent=status_percent, dataframe_already_updated=True)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'main.ui'), LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionSave_dataframe_as.triggered.connect(lambda: self.on_save_dataframe_triggered(True))
self.ui.actionSave_dataframe.triggered.connect(lambda: self.on_save_dataframe_triggered(False))
self.ui.actionLoad_dataframe.triggered.connect(self.on_load_dataframe_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file, restore_window_geometry)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Load the window geometry now, but then defer the other loading until 50ms
# after the window has shown, so that the GUI pops up faster in the meantime.
try:
self.load_window_geometry_configuration(autoload_config_file)
except Exception:
# ignore error for now and let it be raised again in the call to load_configuration:
restore_window_geometry = True
else:
# Success - skip loading window geometry in load_configuration:
restore_window_geometry = False
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
return True
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename, restore_window_geometry=True):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
if restore_window_geometry:
self.load_window_geometry_configuration(filename)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def load_window_geometry_configuration(self, filename):
"""Load only the window geometry from the config file. It's useful to have this
separate from the rest of load_configuration so that it can be called before the
window is shown."""
lyse_config = LabConfig(filename)
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def on_save_dataframe_triggered(self, choose_folder=True):
df = self.filebox.shots_model.dataframe.copy()
if len(df) > 0:
default = self.exp_config.get('paths', 'experiment_shot_storage')
if choose_folder:
save_path = QtWidgets.QFileDialog.getExistingDirectory(self.ui, 'Select a Folder for the Dataframes', default)
if type(save_path) is tuple:
save_path, _ = save_path
if not save_path:
# User cancelled
return
sequences = df.sequence.unique()
for sequence in sequences:
sequence_df = pandas.DataFrame(df[df['sequence'] == sequence], columns=df.columns).dropna(axis=1, how='all')
labscript = sequence_df['labscript'].iloc[0]
filename = "dataframe_{}_{}.msg".format(sequence.to_pydatetime().strftime("%Y%m%dT%H%M%S"),labscript[:-3])
if not choose_folder:
save_path = os.path.dirname(sequence_df['filepath'].iloc[0])
sequence_df.infer_objects()
for col in sequence_df.columns :
if sequence_df[col].dtype == object:
sequence_df[col] = pandas.to_numeric(sequence_df[col], errors='ignore')
sequence_df.to_msgpack(os.path.join(save_path, filename))
else:
error_dialog('Dataframe is empty')
def on_load_dataframe_triggered(self):
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'dataframe.msg')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select dataframe file to load',
default,
"dataframe files (*.msg)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
df = pandas.read_msgpack(file).sort_values("run time").reset_index()
# Check for changes in the shot files since the dataframe was exported
def changed_since(filepath, time):
if os.path.isfile(filepath):
return os.path.getmtime(filepath) > time
else:
return False
filepaths = df["filepath"].tolist()
changetime_cache = os.path.getmtime(file)
need_updating = np.where(map(lambda x: changed_since(x, changetime_cache), filepaths))[0]
need_updating = np.sort(need_updating)[::-1] # sort in descending order to not remove the wrong items with pop
# Reload the files where changes where made since exporting
for index in need_updating:
filepath = filepaths.pop(index)
self.filebox.incoming_queue.put(filepath)
df = df.drop(need_updating)
self.filebox.shots_model.add_files(filepaths, df, done=True)
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
splash.update_text('starting analysis server')
server = WebServer(app.port)
splash.update_text('done')
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
splash.hide()
qapplication.exec_()
server.shutdown()
|
proxy_api.py | from flask import Flask, jsonify, make_response
from proxy import Proxy
import multiprocessing
import sqlite3
import time
import yaml
import random
app = Flask(__name__)
def activate():
print("Inside activate")
while 1:
conn = sqlite3.connect("Proxy.db")
cursor = conn.cursor()
x = Proxy(country_code='us', config_yaml_path='config.yaml')
x.run(context={'cursor': cursor})
# x.update_proxies(proxies={'http': 'http://98.116.152.143:3128', 'https': 'http://20.69.69.212:3128'}, context={'cursor': cursor})
# print(x.get_country_proxies_from_db(context={'cursor': cursor}))
# x.extract_new_proxies()
conn.commit()
conn.close()
time.sleep(15)
@app.route('/')
def home():
conn = sqlite3.connect("Proxy.db")
cursor = conn.cursor()
cursor.execute('''SELECT COUNTRY,HTTP, HTTPS
FROM PROXY
WHERE COUNTRY='US';
''')
result = cursor.fetchall()[0]
with open('config.yaml') as f:
list_ = yaml.safe_load(f).get('USER_AGENTS')
response = {result[0]: {'http': result[1], 'https': result[2]}, 'RANDOM_USER_AGENT': random.choice(list_), 'USER_AGENTS':list_}
# conn.commit()
conn.close()
return jsonify(response)
def API():
print('In API')
app.run(debug=True)
if __name__ == '__main__':
# p = multiprocessing.Process(target=API, args=())
p = multiprocessing.Process(target=API)
p.start()
time.sleep(3)
print('After Flask run')
activate()
|
webserver.py | #!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import os, socket, requests, re, random
from SocketServer import ThreadingMixIn
from multiprocessing import Process
PORT_NUMBER = 80
WORKING = False
HEALTHY = True
PROCESS = None
class myHandler(BaseHTTPRequestHandler):
def get_zone(self):
r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/zone', headers={'Metadata-Flavor':'Google' })
if r.status_code == 200:
return re.sub(r'.+zones/(.+)', r'\1', r.text.encode('utf-8'))
else:
return ''
def get_template(self):
r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/attributes/instance-template', headers={'Metadata-Flavor':'Google' })
if r.status_code == 200:
return re.sub(r'.+instanceTemplates/(.+)', r'\1', r.text.encode('utf-8'))
else:
return ''
def burn_cpu(x):
while True:
random.random()*random.random()
def do_GET(self):
global WORKING, HEALTHY, PROCESS
if self.path == '/makeHealthy':
HEALTHY = True
self.send_response(302)
self.send_header('Location','/')
self.end_headers()
return
if self.path == '/makeUnhealthy':
HEALTHY = False
self.send_response(302)
self.send_header('Location','/')
self.end_headers()
return
if self.path == '/startLoad':
if not WORKING:
PROCESS = Process(target=self.burn_cpu)
PROCESS.start()
WORKING = True
self.send_response(302)
self.send_header('Location','/')
self.end_headers()
return
if self.path == '/stopLoad':
if PROCESS.is_alive():
PROCESS.terminate()
WORKING = False
self.send_response(302)
self.send_header('Location','/')
self.end_headers()
return
if self.path == '/health':
if not HEALTHY:
self.send_response(500)
self.end_headers()
else:
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write('<span style="font-family: verdana; font-weight: bold; font-size: 40px">HTTP/1.0 200 OK</span>')
self.wfile.close()
return
HOSTNAME = socket.gethostname().encode('utf-8')
ZONE = self.get_zone()
TEMPLATE = self.get_template()
self.send_response(200 if HEALTHY else 500)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write('''
<html>
<head>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/1.0.0-rc.2/css/materialize.min.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/1.0.0-rc.2/js/materialize.min.js"></script>
</head>
<body>
<table class="striped">
<colgroup>
<col width="200">
</colgroup>
<tbody>
<tr>
<td>Hostname:</td>
<td><b>''' + HOSTNAME +'''</b></td>
</tr>
<tr>
<td>Zone:</td>
<td><b>''' + ZONE +'''</b></td>
</tr>
<tr>
<td>Template:</td>
<td><b>''' + TEMPLATE +'''</b></td>
</tr>
<tr>
<td>Current load:</td>
<td><span class="btn ''' + ('red' if WORKING else 'green') + '''"">''' + ('high' if WORKING else 'none') + '''</span></td>
</tr>
<tr>
<td>Health status:</td>
<td><span class="btn ''' + ('green' if HEALTHY else 'red') + '''">''' + ('healthy' if HEALTHY else 'unhealthy') + '''</span></td>
</tr>
<tr>
<td>Actions:</td>
<td>
<a class="btn blue" href="/''' + ('makeUnhealthy' if HEALTHY else 'makeHealthy') + '''">Make ''' + ('unhealthy' if HEALTHY else 'healthy') + '''</a>
<a class="btn blue" href="/''' + ('stop' if WORKING else 'start') + '''Load">''' + ('Stop' if WORKING else 'Start') + ''' load</a>
<a class="btn blue" href="/health">Check health</a>
</td>
</tr>
</tbody>
</table>
</body>
</html>
''')
self.wfile.close()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
try:
server = ThreadedHTTPServer(('', PORT_NUMBER), myHandler)
print('Started httpserver on port %s' % PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
|
stats_server.py | #!/usr/bin/env python
"""Stats server implementation."""
import BaseHTTPServer
import collections
import json
import logging
import socket
import threading
from grr import config
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
def _JSONMetricValue(metric_info, value):
if metric_info.metric_type == stats.MetricType.EVENT:
return dict(
sum=value.sum,
counter=value.count,
bins_heights=collections.OrderedDict(value.bins_heights))
else:
return value
def BuildVarzJsonString():
"""Builds Varz JSON string from all stats metrics."""
results = {}
for name, metric_info in stats.STATS.GetAllMetricsMetadata().iteritems():
info_dict = dict(metric_type=metric_info.metric_type.name)
if metric_info.value_type:
info_dict["value_type"] = metric_info.value_type.name
if metric_info.docstring:
info_dict["docstring"] = metric_info.docstring
if metric_info.units:
info_dict["units"] = metric_info.units.name
if metric_info.fields_defs:
info_dict["fields_defs"] = []
for field_def in metric_info.fields_defs:
info_dict["fields_defs"].append((field_def.field_name,
utils.SmartStr(field_def.field_type)))
value = {}
all_fields = stats.STATS.GetMetricFields(name)
for f in all_fields:
joined_fields = ":".join(utils.SmartStr(fname) for fname in f)
value[joined_fields] = _JSONMetricValue(metric_info,
stats.STATS.GetMetricValue(
name, fields=f))
else:
value = _JSONMetricValue(metric_info, stats.STATS.GetMetricValue(name))
results[name] = dict(info=info_dict, value=value)
encoder = json.JSONEncoder()
return encoder.encode(results)
class StatsServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Default stats server implementation."""
def do_GET(self): # pylint: disable=g-bad-name
if self.path == "/varz":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(BuildVarzJsonString())
else:
self.send_error(403, "Access forbidden: %s" % self.path)
class StatsServer(object):
def __init__(self, port):
self.port = port
def Start(self):
"""Start HTTPServer."""
# Use the same number of available ports as the adminui is using. If we
# have 10 available for adminui we will need 10 for the stats server.
adminui_max_port = config.CONFIG.Get("AdminUI.port_max",
config.CONFIG["AdminUI.port"])
additional_ports = adminui_max_port - config.CONFIG["AdminUI.port"]
max_port = self.port + additional_ports
for port in range(self.port, max_port + 1):
# Make a simple reference implementation WSGI server
try:
server = BaseHTTPServer.HTTPServer(("", port), StatsServerHandler)
break
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE and port < max_port:
logging.info("Port %s in use, trying %s", port, port + 1)
else:
raise
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
class StatsServerInit(registry.InitHook):
"""Starts up a varz server after everything is registered."""
def RunOnce(self):
"""Main method of this registry hook.
StatsServer implementation may be overriden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one.
"""
# Figure out which port to use.
port = config.CONFIG["Monitoring.http_port"]
if port != 0:
logging.info("Starting monitoring server on port %d.", port)
try:
# pylint: disable=g-import-not-at-top
from grr.server.local import stats_server
# pylint: enable=g-import-not-at-top
server_obj = stats_server.StatsServer(port)
logging.debug("Using local StatsServer")
except ImportError:
logging.debug("Using default StatsServer")
server_obj = StatsServer(port)
server_obj.Start()
else:
logging.info("Monitoring server disabled.")
|
sdk_worker.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import functools
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from concurrent import futures
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import metrics_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.data_plane import PeriodicThread
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.runners.worker.worker_status import FnApiWorkerStatusHandler
from apache_beam.runners.worker.worker_status import thread_dump
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
# TODO(BEAM-9372): move this out of the TYPE_CHECKING scope when we drop
# support for python < 3.5.3
from types import TracebackType
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
OptExcInfo = Union[ExcInfo, Tuple[None, None, None]]
from apache_beam.portability.api import endpoints_pb2
from apache_beam.utils.profiler import Profile
T = TypeVar('T')
_KT = TypeVar('_KT')
_VT = TypeVar('_VT')
_LOGGER = logging.getLogger(__name__)
# This SDK harness will (by default), log a "lull" in processing if it sees no
# transitions in over 5 minutes.
# 5 minutes * 60 seconds * 1000 millis * 1000 micros * 1000 nanoseconds
DEFAULT_LOG_LULL_TIMEOUT_NS = 5 * 60 * 1000 * 1000 * 1000
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S = 60
# Full thread dump is performed at most every 20 minutes.
LOG_LULL_FULL_THREAD_DUMP_INTERVAL_S = 20 * 60
# Full thread dump is performed if the lull is more than 20 minutes.
LOG_LULL_FULL_THREAD_DUMP_LULL_S = 20 * 60
# The number of ProcessBundleRequest instruction ids the BundleProcessorCache
# will remember for not running instructions.
MAX_KNOWN_NOT_RUNNING_INSTRUCTIONS = 1000
# The number of ProcessBundleRequest instruction ids that BundleProcessorCache
# will remember for failed instructions.
MAX_FAILED_INSTRUCTIONS = 10000
class ShortIdCache(object):
""" Cache for MonitoringInfo "short ids"
"""
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._lastShortId = 0
self._infoKeyToShortId = {} # type: Dict[FrozenSet, str]
self._shortIdToInfo = {} # type: Dict[str, metrics_pb2.MonitoringInfo]
def getShortId(self, monitoring_info):
# type: (metrics_pb2.MonitoringInfo) -> str
""" Returns the assigned shortId for a given MonitoringInfo, assigns one if
not assigned already.
"""
key = monitoring_infos.to_key(monitoring_info)
with self._lock:
try:
return self._infoKeyToShortId[key]
except KeyError:
self._lastShortId += 1
# Convert to a hex string (and drop the '0x') for some compression
shortId = hex(self._lastShortId)[2:]
payload_cleared = metrics_pb2.MonitoringInfo()
payload_cleared.CopyFrom(monitoring_info)
payload_cleared.ClearField('payload')
self._infoKeyToShortId[key] = shortId
self._shortIdToInfo[shortId] = payload_cleared
return shortId
def getInfos(self, short_ids):
#type: (Iterable[str]) -> Dict[str, metrics_pb2.MonitoringInfo]
""" Gets the base MonitoringInfo (with payload cleared) for each short ID.
Throws KeyError if an unassigned short ID is encountered.
"""
return {short_id: self._shortIdToInfo[short_id] for short_id in short_ids}
SHORT_ID_CACHE = ShortIdCache()
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
def __init__(
self,
control_address, # type: str
credentials=None, # type: Optional[grpc.ChannelCredentials]
worker_id=None, # type: Optional[str]
# Caching is disabled by default
state_cache_size=0, # type: int
# time-based data buffering is disabled by default
data_buffer_time_limit_ms=0, # type: int
profiler_factory=None, # type: Optional[Callable[..., Profile]]
status_address=None, # type: Optional[str]
# Heap dump through status api is disabled by default
enable_heap_dump=False, # type: bool
):
# type: (...) -> None
self._alive = True
self._worker_index = 0
self._worker_id = worker_id
self._state_cache = StateCache(state_cache_size)
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if credentials is None:
_LOGGER.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address, options=options)
else:
_LOGGER.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials, options=options)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
_LOGGER.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials, self._worker_id, data_buffer_time_limit_ms)
self._state_handler_factory = GrpcStateHandlerFactory(
self._state_cache, credentials)
self._profiler_factory = profiler_factory
def default_factory(id):
# type: (str) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._control_stub.GetProcessBundleDescriptor(
beam_fn_api_pb2.GetProcessBundleDescriptorRequest(
process_bundle_descriptor_id=id))
self._fns = KeyedDefaultDict(default_factory)
# BundleProcessor cache across all workers.
self._bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
if status_address:
try:
self._status_handler = FnApiWorkerStatusHandler(
status_address, self._bundle_processor_cache,
enable_heap_dump) # type: Optional[FnApiWorkerStatusHandler]
except Exception:
traceback_string = traceback.format_exc()
_LOGGER.warning(
'Error creating worker status request handler, '
'skipping status report. Trace back: %s' % traceback_string)
else:
self._status_handler = None
# TODO(BEAM-8998) use common
# thread_pool_executor.shared_unbounded_instance() to process bundle
# progress once dataflow runner's excessive progress polling is removed.
self._report_progress_executor = futures.ThreadPoolExecutor(max_workers=1)
self._worker_thread_pool = thread_pool_executor.shared_unbounded_instance()
self._responses = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.InstructionResponse, Sentinel]]
_LOGGER.info('Initializing SDKHarness with unbounded number of workers.')
def run(self):
# type: () -> None
self._control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(
self._control_channel)
no_more_work = Sentinel.sentinel
def get_responses():
# type: () -> Iterator[beam_fn_api_pb2.InstructionResponse]
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
try:
for work_request in self._control_stub.Control(get_responses()):
_LOGGER.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
_LOGGER.info('No more requests from control plane')
_LOGGER.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._worker_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
self._bundle_processor_cache.shutdown()
if self._status_handler:
self._status_handler.close()
_LOGGER.info('Done consuming work.')
def _execute(
self,
task, # type: Callable[[], beam_fn_api_pb2.InstructionResponse]
request # type: beam_fn_api_pb2.InstructionRequest
):
# type: (...) -> None
with statesampler.instruction_id(request.instruction_id):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
_LOGGER.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id,
traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
# registration request is handled synchronously
self._execute(lambda: self.create_worker().do_instruction(request), request)
def _request_process_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._bundle_processor_cache.activate(request.instruction_id)
self._request_execute(request)
def _request_process_bundle_split(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
# type: () -> None
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._report_progress_executor.submit(task)
def _request_finalize_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_execute(request)
def _request_execute(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
# type: () -> None
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._worker_thread_pool.submit(task)
_LOGGER.debug(
"Currently using %s threads." % len(self._worker_thread_pool._workers))
def create_worker(self):
# type: () -> SdkWorker
return SdkWorker(
self._bundle_processor_cache,
state_cache_metrics_fn=self._state_cache.get_monitoring_infos,
profiler_factory=self._profiler_factory)
class BundleProcessorCache(object):
"""A cache for ``BundleProcessor``s.
``BundleProcessor`` objects are cached by the id of their
``beam_fn_api_pb2.ProcessBundleDescriptor``.
Attributes:
fns (dict): A dictionary that maps bundle descriptor IDs to instances of
``beam_fn_api_pb2.ProcessBundleDescriptor``.
state_handler_factory (``StateHandlerFactory``): Used to create state
handlers to be used by a ``bundle_processor.BundleProcessor`` during
processing.
data_channel_factory (``data_plane.DataChannelFactory``)
active_bundle_processors (dict): A dictionary, indexed by instruction IDs,
containing ``bundle_processor.BundleProcessor`` objects that are currently
active processing the corresponding instruction.
cached_bundle_processors (dict): A dictionary, indexed by bundle processor
id, of cached ``bundle_processor.BundleProcessor`` that are not currently
performing processing.
"""
periodic_shutdown = None # type: Optional[PeriodicThread]
def __init__(
self,
state_handler_factory, # type: StateHandlerFactory
data_channel_factory, # type: data_plane.DataChannelFactory
fns # type: MutableMapping[str, beam_fn_api_pb2.ProcessBundleDescriptor]
):
# type: (...) -> None
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.known_not_running_instruction_ids = collections.OrderedDict(
) # type: collections.OrderedDict[str, bool]
self.failed_instruction_ids = collections.OrderedDict(
) # type: collections.OrderedDict[str, bool]
self.active_bundle_processors = {
} # type: Dict[str, Tuple[str, bundle_processor.BundleProcessor]]
self.cached_bundle_processors = collections.defaultdict(
list) # type: DefaultDict[str, List[bundle_processor.BundleProcessor]]
self.last_access_times = collections.defaultdict(
float) # type: DefaultDict[str, float]
self._schedule_periodic_shutdown()
self._lock = threading.Lock()
def register(self, bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
"""Register a ``beam_fn_api_pb2.ProcessBundleDescriptor`` by its id."""
self.fns[bundle_descriptor.id] = bundle_descriptor
def activate(self, instruction_id):
# type: (str) -> None
"""Makes the ``instruction_id`` known to the bundle processor.
Allows ``lookup`` to return ``None``. Necessary if ``lookup`` can occur
before ``get``.
"""
with self._lock:
self.known_not_running_instruction_ids[instruction_id] = True
def get(self, instruction_id, bundle_descriptor_id):
# type: (str, str) -> bundle_processor.BundleProcessor
"""
Return the requested ``BundleProcessor``, creating it if necessary.
Moves the ``BundleProcessor`` from the inactive to the active cache.
"""
with self._lock:
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
try:
del self.known_not_running_instruction_ids[instruction_id]
except KeyError:
# The instruction may have not been pre-registered before execution
# since activate() may have never been invoked
pass
return processor
except IndexError:
pass
# Make sure we instantiate the processor while not holding the lock.
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
with self._lock:
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
try:
del self.known_not_running_instruction_ids[instruction_id]
except KeyError:
# The instruction may have not been pre-registered before execution
# since activate() may have never been invoked
pass
return processor
def lookup(self, instruction_id):
# type: (str) -> Optional[bundle_processor.BundleProcessor]
"""
Return the requested ``BundleProcessor`` from the cache.
Will return ``None`` if the BundleProcessor is known but not yet ready. Will
raise an error if the ``instruction_id`` is not known or has been discarded.
"""
with self._lock:
if instruction_id in self.failed_instruction_ids:
raise RuntimeError(
'Bundle processing associated with %s has failed. '
'Check prior failing response for details.' % instruction_id)
processor = self.active_bundle_processors.get(
instruction_id, (None, None))[-1]
if processor:
return processor
if instruction_id in self.known_not_running_instruction_ids:
return None
raise RuntimeError('Unknown process bundle id %s.' % instruction_id)
def discard(self, instruction_id):
# type: (str) -> None
"""
Marks the instruction id as failed shutting down the ``BundleProcessor``.
"""
with self._lock:
self.failed_instruction_ids[instruction_id] = True
while len(self.failed_instruction_ids) > MAX_FAILED_INSTRUCTIONS:
self.failed_instruction_ids.popitem(last=False)
processor = self.active_bundle_processors[instruction_id][1]
del self.active_bundle_processors[instruction_id]
# Perform the shutdown while not holding the lock.
processor.shutdown()
def release(self, instruction_id):
# type: (str) -> None
"""
Release the requested ``BundleProcessor``.
Resets the ``BundleProcessor`` and moves it from the active to the
inactive cache.
"""
with self._lock:
self.known_not_running_instruction_ids[instruction_id] = True
while len(self.known_not_running_instruction_ids
) > MAX_KNOWN_NOT_RUNNING_INSTRUCTIONS:
self.known_not_running_instruction_ids.popitem(last=False)
descriptor_id, processor = (
self.active_bundle_processors.pop(instruction_id))
# Make sure that we reset the processor while not holding the lock.
processor.reset()
with self._lock:
self.last_access_times[descriptor_id] = time.time()
self.cached_bundle_processors[descriptor_id].append(processor)
def shutdown(self):
# type: () -> None
"""
Shutdown all ``BundleProcessor``s in the cache.
"""
if self.periodic_shutdown:
self.periodic_shutdown.cancel()
self.periodic_shutdown.join()
self.periodic_shutdown = None
for instruction_id in list(self.active_bundle_processors.keys()):
self.discard(instruction_id)
for cached_bundle_processors in self.cached_bundle_processors.values():
BundleProcessorCache._shutdown_cached_bundle_processors(
cached_bundle_processors)
def _schedule_periodic_shutdown(self):
# type: () -> None
def shutdown_inactive_bundle_processors():
# type: () -> None
for descriptor_id, last_access_time in self.last_access_times.items():
if (time.time() - last_access_time >
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S):
BundleProcessorCache._shutdown_cached_bundle_processors(
self.cached_bundle_processors[descriptor_id])
self.periodic_shutdown = PeriodicThread(
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S,
shutdown_inactive_bundle_processors)
self.periodic_shutdown.daemon = True
self.periodic_shutdown.start()
@staticmethod
def _shutdown_cached_bundle_processors(cached_bundle_processors):
# type: (List[bundle_processor.BundleProcessor]) -> None
try:
while True:
# pop() is threadsafe
bundle_processor = cached_bundle_processors.pop()
bundle_processor.shutdown()
except IndexError:
pass
class SdkWorker(object):
def __init__(
self,
bundle_processor_cache, # type: BundleProcessorCache
state_cache_metrics_fn=list, # type: Callable[[], Iterable[metrics_pb2.MonitoringInfo]]
profiler_factory=None, # type: Optional[Callable[..., Profile]]
log_lull_timeout_ns=None, # type: Optional[int]
):
# type: (...) -> None
self.bundle_processor_cache = bundle_processor_cache
self.state_cache_metrics_fn = state_cache_metrics_fn
self.profiler_factory = profiler_factory
self.log_lull_timeout_ns = (
log_lull_timeout_ns or DEFAULT_LOG_LULL_TIMEOUT_NS)
self._last_full_thread_dump_secs = 0.0
def do_instruction(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> beam_fn_api_pb2.InstructionResponse
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(
getattr(request, request_type), request.instruction_id)
else:
raise NotImplementedError
def register(
self,
request, # type: beam_fn_api_pb2.RegisterRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
"""Registers a set of ``beam_fn_api_pb2.ProcessBundleDescriptor``s.
This set of ``beam_fn_api_pb2.ProcessBundleDescriptor`` come as part of a
``beam_fn_api_pb2.RegisterRequest``, which the runner sends to the SDK
worker before starting processing to register stages.
"""
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(
self,
request, # type: beam_fn_api_pb2.ProcessBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_id)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id, request.cache_tokens):
with self.maybe_profile(instruction_id):
delayed_applications, requests_finalization = (
bundle_processor.process_bundle(instruction_id))
monitoring_infos = bundle_processor.monitoring_infos()
monitoring_infos.extend(self.state_cache_metrics_fn())
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.getShortId(info): info.payload
for info in monitoring_infos
},
requires_finalization=requests_finalization))
# Don't release here if finalize is needed.
if not requests_finalization:
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(
self,
request, # type: beam_fn_api_pb2.ProcessBundleSplitRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
# Return an empty response if we aren't running. This can happen
# if the ProcessBundleRequest has not started or already finished.
process_bundle_split = (
processor.try_split(request)
if processor else beam_fn_api_pb2.ProcessBundleSplitResponse())
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=process_bundle_split)
def _log_lull_in_bundle_processor(self, processor):
# type: (bundle_processor.BundleProcessor) -> None
sampler_info = processor.state_sampler.get_info()
self._log_lull_sampler_info(sampler_info)
def _log_lull_sampler_info(self, sampler_info):
# type: (statesampler.StateSamplerInfo) -> None
if (sampler_info and sampler_info.time_since_transition and
sampler_info.time_since_transition > self.log_lull_timeout_ns):
step_name = sampler_info.state_name.step_name
state_name = sampler_info.state_name.name
lull_seconds = sampler_info.time_since_transition / 1e9
state_lull_log = (
'Operation ongoing for over %.2f seconds in state %s' %
(lull_seconds, state_name))
step_name_log = (' in step %s ' % step_name) if step_name else ''
exec_thread = getattr(sampler_info, 'tracked_thread', None)
if exec_thread is not None:
thread_frame = sys._current_frames().get(exec_thread.ident) # pylint: disable=protected-access
stack_trace = '\n'.join(
traceback.format_stack(thread_frame)) if thread_frame else ''
else:
stack_trace = '-NOT AVAILABLE-'
_LOGGER.warning(
'%s%s without returning. Current Traceback:\n%s',
state_lull_log,
step_name_log,
stack_trace)
if self._should_log_full_thread_dump(lull_seconds):
self._log_full_thread_dump()
def _should_log_full_thread_dump(self, lull_seconds):
# type: (float) -> bool
if lull_seconds < LOG_LULL_FULL_THREAD_DUMP_LULL_S:
return False
now = time.time()
if (self._last_full_thread_dump_secs + LOG_LULL_FULL_THREAD_DUMP_INTERVAL_S
< now):
self._last_full_thread_dump_secs = now
return True
return False
def _log_full_thread_dump(self):
# type: () -> None
thread_dump()
def process_bundle_progress(
self,
request, # type: beam_fn_api_pb2.ProcessBundleProgressRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
if processor:
self._log_lull_in_bundle_processor(processor)
monitoring_infos = processor.monitoring_infos()
else:
# Return an empty response if we aren't running. This can happen
# if the ProcessBundleRequest has not started or already finished.
monitoring_infos = []
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.getShortId(info): info.payload
for info in monitoring_infos
}))
def monitoring_infos_request(
self,
request, # type: beam_fn_api_pb2.MonitoringInfosMetadataRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
monitoring_infos=beam_fn_api_pb2.MonitoringInfosMetadataResponse(
monitoring_info=SHORT_ID_CACHE.getInfos(
request.monitoring_info_id)))
def finalize_bundle(
self,
request, # type: beam_fn_api_pb2.FinalizeBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
if processor:
try:
finalize_response = processor.finalize_bundle()
self.bundle_processor_cache.release(request.instruction_id)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, finalize_bundle=finalize_response)
except:
self.bundle_processor_cache.discard(request.instruction_id)
raise
# We can reach this state if there was an erroneous request to finalize
# the bundle while it is being initialized or has already been finalized
# and released.
raise RuntimeError(
'Bundle is not in a finalizable state for %s' % instruction_id)
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
# type: (str) -> Iterator[None]
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandler(with_metaclass(abc.ABCMeta, object)): # type: ignore[misc]
"""An abstract object representing a ``StateHandler``."""
@abc.abstractmethod
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
raise NotImplementedError(type(self))
@abc.abstractmethod
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise NotImplementedError(type(self))
class StateHandlerFactory(with_metaclass(abc.ABCMeta, object)): # type: ignore[misc]
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
# type: () -> None
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, state_cache, credentials=None):
# type: (StateCache, Optional[grpc.ChannelCredentials]) -> None
self._state_handler_cache = {} # type: Dict[str, CachingStateHandler]
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
self._state_cache = state_cache
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
_LOGGER.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
_LOGGER.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
_LOGGER.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor())
self._state_handler_cache[url] = CachingStateHandler(
self._state_cache,
GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel)))
return self._state_handler_cache[url]
def close(self):
# type: () -> None
_LOGGER.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
self._state_cache.evict_all()
class ThrowingStateHandler(StateHandler):
"""A state handler that errors on any requests."""
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
class GrpcStateHandler(StateHandler):
_DONE = Sentinel.sentinel
def __init__(self, state_stub):
# type: (beam_fn_api_pb2_grpc.BeamFnStateStub) -> None
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.StateRequest, Sentinel]]
self._responses_by_id = {} # type: Dict[str, _Future]
self._last_id = 0
self._exc_info = None # type: Optional[OptExcInfo]
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
# type: (str) -> Iterator[None]
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
# type: () -> None
self._done = False
def request_iter():
# type: () -> Iterator[beam_fn_api_pb2.StateRequest]
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
# type: () -> None
try:
for response in responses:
# Popping an item from a dictionary is atomic in cPython
future = self._responses_by_id.pop(response.id)
future.set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
# type: () -> None
self._done = True
self._requests.put(self._DONE)
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def append_raw(
self,
state_key, # type: Optional[beam_fn_api_pb2.StateKey]
data # type: bytes
):
# type: (...) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def clear(self, state_key):
# type: (Optional[beam_fn_api_pb2.StateKey]) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key, clear=beam_fn_api_pb2.StateClearRequest()))
def _request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> _Future[beam_fn_api_pb2.StateResponse]
request.id = self._next_id()
request.instruction_id = self._context.process_instruction_id
# Adding a new item to a dictionary is atomic in cPython
self._responses_by_id[request.id] = future = _Future[
beam_fn_api_pb2.StateResponse]()
# Request queue is thread-safe
self._requests.put(request)
return future
def _blocking_request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> beam_fn_api_pb2.StateResponse
req_future = self._request(request)
while not req_future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
response = req_future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
# type: () -> str
with self._lock:
# Use a lock here because this GrpcStateHandler is shared across all
# requests which have the same process bundle descriptor. State requests
# can concurrently access this section if a Runner uses threads / workers
# (aka "parallelism") to send data to this SdkHarness and its workers.
self._last_id += 1
request_id = self._last_id
return str(request_id)
class CachingStateHandler(object):
""" A State handler which retrieves and caches state.
If caching is activated, caches across bundles using a supplied cache token.
If activated but no cache token is supplied, caching is done at the bundle
level.
"""
def __init__(
self,
global_state_cache, # type: StateCache
underlying_state # type: StateHandler
):
# type: (...) -> None
self._underlying = underlying_state
self._state_cache = global_state_cache
self._context = threading.local()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
if getattr(self._context, 'user_state_cache_token', None) is not None:
raise RuntimeError(
'Cache tokens already set to %s' %
self._context.user_state_cache_token)
self._context.side_input_cache_tokens = {}
user_state_cache_token = None
for cache_token_struct in cache_tokens:
if cache_token_struct.HasField("user_state"):
# There should only be one user state token present
assert not user_state_cache_token
user_state_cache_token = cache_token_struct.token
elif cache_token_struct.HasField("side_input"):
self._context.side_input_cache_tokens[
cache_token_struct.side_input.transform_id,
cache_token_struct.side_input.
side_input_id] = cache_token_struct.token
# TODO: Consider a two-level cache to avoid extra logic and locking
# for items cached at the bundle level.
self._context.bundle_cache_token = bundle_id
try:
self._state_cache.initialize_metrics()
self._context.user_state_cache_token = user_state_cache_token
with self._underlying.process_instruction_id(bundle_id):
yield
finally:
self._context.side_input_cache_tokens = {}
self._context.user_state_cache_token = None
self._context.bundle_cache_token = None
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
cache_token = self._get_cache_token(state_key)
if not cache_token:
# Cache disabled / no cache token. Can't do a lookup/store in the cache.
# Fall back to lazily materializing the state, one element at a time.
return self._lazy_iterator(state_key, coder)
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_state_key, cache_token)
if cached_value is None:
# Cache miss, need to retrieve from the Runner
# Further size estimation or the use of the continuation token on the
# runner side could fall back to materializing one item at a time.
# https://jira.apache.org/jira/browse/BEAM-8297
materialized = cached_value = (
self._partially_cached_iterable(state_key, coder))
if isinstance(materialized, (list, self.ContinuationIterable)):
self._state_cache.put(cache_state_key, cache_token, materialized)
else:
_LOGGER.error(
"Uncacheable type %s for key %s. Not caching.",
materialized,
state_key)
return cached_value
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
# Update the cache
cache_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_key, cache_token)
# Keep in mind that the state for this key can be evicted
# while executing this function. Either read or write to the cache
# but never do both here!
if cached_value is None:
# We have never cached this key before, first retrieve state
cached_value = self.blocking_get(state_key, coder)
# Just extend the already cached value
if isinstance(cached_value, list):
# Materialize provided iterable to ensure reproducible iterations,
# here and when writing to the state handler below.
elements = list(elements)
# The state is fully cached and can be extended
cached_value.extend(elements)
elif isinstance(cached_value, self.ContinuationIterable):
# The state is too large to be fully cached (continuation token used),
# only the first part is cached, the rest if enumerated via the runner.
pass
else:
# When a corrupt value made it into the cache, we have to fail.
raise Exception("Unexpected cached value: %s" % cached_value)
# Write to state handler
out = coder_impl.create_OutputStream()
for element in elements:
coder.encode_to_stream(element, out, True)
return self._underlying.append_raw(state_key, out.get())
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
cache_key = self._convert_to_cache_key(state_key)
self._state_cache.clear(cache_key, cache_token)
return self._underlying.clear(state_key)
def done(self):
# type: () -> None
self._underlying.done()
def _lazy_iterator(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Iterator[Any]
"""Materializes the state lazily, one element at a time.
:return A generator which returns the next element if advanced.
"""
while True:
data, continuation_token = (
self._underlying.get_raw(state_key, continuation_token))
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
yield coder.decode_from_stream(input_stream, True)
if not continuation_token:
break
def _get_cache_token(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> Optional[bytes]
if not self._state_cache.is_cache_enabled():
return None
elif state_key.HasField('bag_user_state'):
if self._context.user_state_cache_token:
return self._context.user_state_cache_token
else:
return self._context.bundle_cache_token
elif state_key.WhichOneof('type').endswith('_side_input'):
side_input = getattr(state_key, state_key.WhichOneof('type'))
return self._context.side_input_cache_tokens.get(
(side_input.transform_id, side_input.side_input_id),
self._context.bundle_cache_token)
return None
def _partially_cached_iterable(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
"""Materialized the first page of data, concatenated with a lazy iterable
of the rest, if any.
"""
data, continuation_token = self._underlying.get_raw(state_key, None)
head = []
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
head.append(coder.decode_from_stream(input_stream, True))
if not continuation_token:
return head
else:
return self.ContinuationIterable(
head,
functools.partial(
self._lazy_iterator, state_key, coder, continuation_token))
class ContinuationIterable(Generic[T]):
def __init__(self, head, continue_iterator_fn):
# type: (Iterable[T], Callable[[], Iterable[T]]) -> None
self.head = head
self.continue_iterator_fn = continue_iterator_fn
def __iter__(self):
# type: () -> Iterator[T]
for item in self.head:
yield item
for item in self.continue_iterator_fn():
yield item
@staticmethod
def _convert_to_cache_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class _Future(Generic[T]):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
# type: () -> None
self._event = threading.Event()
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
return self._event.wait(timeout)
def get(self, timeout=None):
# type: (Optional[float]) -> T
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
# type: (T) -> None
self._value = value
self._event.set()
@classmethod
def done(cls):
# type: () -> _Future[None]
if not hasattr(cls, 'DONE'):
done_future = _Future[None]()
done_future.set(None)
cls.DONE = done_future # type: ignore[attr-defined]
return cls.DONE # type: ignore[attr-defined]
class KeyedDefaultDict(DefaultDict[_KT, _VT]):
if TYPE_CHECKING:
# we promise to only use a subset of what DefaultDict can do
def __init__(self, default_factory):
# type: (Callable[[_KT], _VT]) -> None
pass
def __missing__(self, key):
# type: (_KT) -> _VT
# typing: default_factory takes an arg, but the base class does not
self[key] = self.default_factory(key) # type: ignore # pylint: disable=E1137
return self[key]
|
servers.py | """
Starting in CherryPy 3.1, cherrypy.server is implemented as an
:ref:`Engine Plugin<plugins>`. It's an instance of
:class:`cherrypy._cpserver.Server`, which is a subclass of
:class:`cherrypy.process.servers.ServerAdapter`. The ``ServerAdapter`` class
is designed to control other servers, as well.
Multiple servers/ports
======================
If you need to start more than one HTTP server (to serve on multiple ports, or
protocols, etc.), you can manually register each one and then start them all
with engine.start::
s1 = ServerAdapter(cherrypy.engine, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(cherrypy.engine,
another.HTTPServer(host='127.0.0.1',
SSL=True))
s1.subscribe()
s2.subscribe()
cherrypy.engine.start()
.. index:: SCGI
FastCGI/SCGI
============
There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in
:mod:`cherrypy.process.servers`. To start an fcgi server, for example,
wrap an instance of it in a ServerAdapter::
addr = ('0.0.0.0', 4000)
f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr)
s = servers.ServerAdapter(cherrypy.engine, httpserver=f, bind_addr=addr)
s.subscribe()
The :doc:`cherryd</deployguide/cherryd>` startup script will do the above for
you via its `-f` flag.
Note that you need to download and install `flup <http://trac.saddi.com/flup>`_
yourself, whether you use ``cherryd`` or not.
.. _fastcgi:
.. index:: FastCGI
FastCGI
-------
A very simple setup lets your cherry run with FastCGI.
You just need the flup library,
plus a running Apache server (with ``mod_fastcgi``) or lighttpd server.
CherryPy code
^^^^^^^^^^^^^
hello.py::
#!/usr/bin/python
import cherrypy
class HelloWorld:
\"""Sample request handler class.\"""
def index(self):
return "Hello world!"
index.exposed = True
cherrypy.tree.mount(HelloWorld())
# CherryPy autoreload must be disabled for the flup server to work
cherrypy.config.update({'engine.autoreload.on':False})
Then run :doc:`/deployguide/cherryd` with the '-f' arg::
cherryd -c <myconfig> -d -f -i hello.py
Apache
^^^^^^
At the top level in httpd.conf::
FastCgiIpcDir /tmp
FastCgiServer /path/to/cherry.fcgi -idle-timeout 120 -processes 4
And inside the relevant VirtualHost section::
# FastCGI config
AddHandler fastcgi-script .fcgi
ScriptAliasMatch (.*$) /path/to/cherry.fcgi$1
Lighttpd
^^^^^^^^
For `Lighttpd <http://www.lighttpd.net/>`_ you can follow these
instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is
active within ``server.modules``. Then, within your ``$HTTP["host"]``
directive, configure your fastcgi script like the following::
$HTTP["url"] =~ "" {
fastcgi.server = (
"/" => (
"script.fcgi" => (
"bin-path" => "/path/to/your/script.fcgi",
"socket" => "/tmp/script.sock",
"check-local" => "disable",
"disable-time" => 1,
"min-procs" => 1,
"max-procs" => 1, # adjust as needed
),
),
)
} # end of $HTTP["url"] =~ "^/"
Please see `Lighttpd FastCGI Docs
<http://redmine.lighttpd.net/wiki/lighttpd/Docs:ModFastCGI>`_ for
an explanation of the possible configuration options.
"""
import sys
import time
import warnings
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start::
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if self.bind_addr is None:
on_what = "unknown interface (dynamic?)"
elif isinstance(self.bind_addr, tuple):
on_what = self._get_base()
else:
on_what = "socket file: %s" % self.bind_addr
if self.running:
self.bus.log("Already serving on %s" % on_what)
return
self.interrupt = None
if not self.httpserver:
raise ValueError("No HTTP server has been created.")
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName("HTTPServer " + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log("Serving on %s" % on_what)
start.priority = 75
def _get_base(self):
if not self.httpserver:
return ''
host, port = self.bind_addr
if getattr(self.httpserver, 'ssl_certificate', None) or \
getattr(self.httpserver, 'ssl_adapter', None):
scheme = "https"
if port != 443:
host += ":%s" % port
else:
scheme = "http"
if port != 80:
host += ":%s" % port
return "%s://%s" % (scheme, host)
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt:
self.bus.log("<Ctrl-C> hit: shutting down HTTP server")
self.interrupt = sys.exc_info()[1]
self.bus.exit()
except SystemExit:
self.bus.log("SystemExit raised: shutting down HTTP server")
self.interrupt = sys.exc_info()[1]
self.bus.exit()
raise
except:
self.interrupt = sys.exc_info()[1]
self.bus.log("Error in HTTP server: shutting down",
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, "ready", False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# Wait for port to be occupied
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
wait_for_occupied_port(host, port)
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
self.running = False
self.bus.log("HTTP Server %s shut down" % self.httpserver)
else:
self.bus.log("HTTP Server %s already shut down" % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupCGIServer(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
import socket
if not hasattr(socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = (
self.fcgiserver._threadPool._idleCount)
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host in ('::', '::0', '::0.0.0.0'):
# :: is IN6ADDR_ANY, which should answer on localhost.
# ::0 and ::0.0.0.0 are non-canonical but common
# ways to write IN6ADDR_ANY.
return '::1'
return server_host
def check_port(host, port, timeout=1.0):
"""Raise an error if the given port is not free on the given host."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
host = client_host(host)
port = int(port)
import socket
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
except socket.gaierror:
if ':' in host:
info = [(
socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0)
)]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
for res in info:
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(timeout)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
else:
raise IOError("Port %s is in use on %s; perhaps the previous "
"httpserver did not shut down properly." %
(repr(port), repr(host)))
# Feel free to increase these defaults on slow systems:
free_port_timeout = 0.1
occupied_port_timeout = 1.0
def wait_for_free_port(host, port, timeout=None):
"""Wait for the specified port to become free (drop requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
if timeout is None:
timeout = free_port_timeout
for trial in range(50):
try:
# we are expecting a free port, so reduce the timeout
check_port(host, port, timeout=timeout)
except IOError:
# Give the old server thread time to free the port.
time.sleep(timeout)
else:
return
raise IOError("Port %r not free on %r" % (port, host))
def wait_for_occupied_port(host, port, timeout=None):
"""Wait for the specified port to become active (receive requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
if timeout is None:
timeout = occupied_port_timeout
for trial in range(50):
try:
check_port(host, port, timeout=timeout)
except IOError:
# port is occupied
return
else:
time.sleep(timeout)
if host == client_host(host):
raise IOError("Port %r not bound on %r" % (port, host))
# On systems where a loopback interface is not available and the
# server is bound to all interfaces, it's difficult to determine
# whether the server is in fact occupying the port. In this case,
# just issue a warning and move on. See issue #1100.
msg = "Unable to verify that the server is bound on %r" % port
warnings.warn(msg)
|
RobotHandArduino.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 17:40:25 2021
@author: DiegoDiazGarciaDev
"""
import cv2
from tensorflow.keras.models import load_model
import requests
import threading
import numpy as np
from tensorflow.keras.preprocessing import image
#flag in case we are going to use our ESP8266 connected to the robot hand
ESP8266 = True
#We load the CNN model
new_model = load_model('./model/hand_100epochs.h5')
# This background will be a global variable that we update through a few functions
background = None
# Start with a halfway point between 0 and 1 of accumulated weight
accumulated_weight = 0.5
# Manually set up our ROI for grabbing the hand.
# Feel free to change these. I just chose the top right corner for filming.
roi_top = 20
roi_bottom = 300
roi_right = 300
roi_left = 600
#roi_top, roi_right, roi_bottom, roi_left = 110, 350, 325, 590
def calc_accum_avg(frame, accumulated_weight):
'''
Given a frame and a previous accumulated weight, computed the weighted average of the image passed in.
'''
# Grab the background
global background
# For first time, create the background from a copy of the frame.
if background is None:
background = frame.copy().astype("float")
return None
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(frame, background, accumulated_weight)
def segment(frame, threshold=25):
global background
# Calculates the Absolute Differentce between the backgroud and the passed in frame
diff = cv2.absdiff(background.astype("uint8"), frame)
# Apply a threshold to the image so we can grab the foreground
# We only need the threshold, so we will throw away the first item in the tuple with an underscore _
_ , thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
# Grab the external contours form the image
# Again, only grabbing what we need here and throwing away the rest
#DDG he tenido que quitar el primer elemanto de retorno (image)
contours, hierarchy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# If length of contours list is 0, then we didn't grab any contours!
if len(contours) == 0:
return None
else:
# Given the way we are using the program, the largest external contour should be the hand (largest by area)
# This will be our segment
hand_segment = max(contours, key=cv2.contourArea)
# Return both the hand segment and the thresholded hand image
return (thresholded, hand_segment)
lastPrediction = 99
def predicthand(thresholded, hand_segment):
global lastPrediction
#{'Fist': 0, 'Palm': 1, 'Swing': 2}
hand_file = './data/Temp.png'
hand_file = image.load_img(hand_file, target_size=(89, 100))
hand_file = image.img_to_array(hand_file)
hand_file = np.expand_dims(hand_file, axis=0)
hand_file = hand_file/255
prediction_class = new_model.predict_classes(hand_file)
prediction = new_model.predict(hand_file)
# {'Fist': 0, 'Palm': 1, 'Swing': 2}
#Detecion the type of gesture of the hand and sending this info to arduino depending on that
# we don't do anything unless we are pretty sure with our prediction (95%)
if np.amax(prediction) > 0.95:
if prediction_class[0] == 0:
t = threading.Thread(target=send_info, args=(0,lastPrediction))
t.start()
# send_info(0)
lastPrediction=0
return "Fist"
elif prediction_class[0] == 1:
t = threading.Thread(target=send_info, args=(1,lastPrediction))
t.start()
#send_info(1)
lastPrediction = 1
return "Palm"
else:
return "Swing"
else:
return "No identify"
#Servo initilizer
if ESP8266:
pload = {"degree1": 0}
r = requests.post('http://192.168.0.161/moveServo', json=pload)
def send_info(currentPrediction,lastPrediction):
if ESP8266:
if lastPrediction != currentPrediction:
if currentPrediction==0:
#pload = {"degree1": 10, "degree2": 170}
pload = {"degree1": 115}
r = requests.post('http://192.168.0.161/moveServo', json=pload)
elif currentPrediction == 1:
#pload = {"degree1":20,"degree2":100}
pload = {"degree1":65}
r = requests.post('http://192.168.0.161/moveServo',json = pload)
print(r.status_code)
url='http://192.168.0.162:8080/shot.jpg'
#cam = cv2.VideoCapture(url)
cam = cv2.VideoCapture(0)
# Intialize a frame count
num_frames = 0
# recording flag. Use True if you need create your own hand dataSet.
start_recording = False
image_num = 0
# keep looping, until interrupted
while True:
# get the current frame
#cam = cv2.VideoCapture(url)
ret, frame = cam.read()
# flip the frame so that it is not the mirror view
frame = cv2.flip(frame, 1)
# clone the frame
frame_copy = frame.copy()
# Grab the ROI from the frame
roi = frame[roi_top:roi_bottom, roi_right:roi_left]
# Apply grayscale and blur to ROI
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# For the first 30 frames we will calculate the average of the background.
# We will tell the user while this is happening
if num_frames < 60:
calc_accum_avg(gray, accumulated_weight)
if num_frames <= 59:
cv2.putText(frame_copy, "WAIT! GETTING BACKGROUND AVG.", (200, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.imshow("Finger Count",frame_copy)
else:
# now that we have the background, we can segment the hand.
# segment the hand region
hand = segment(gray)
# First check if we were able to actually detect a hand
if hand is not None:
# unpack
thresholded, hand_segment = hand
# Draw contours around hand segment
cv2.drawContours(frame_copy, [hand_segment + (roi_right, roi_top)], -1, (255, 0, 0),1)
# Count the fingers
cv2.imwrite('./data/Temp.png', thresholded)
#resizeImage('Temp.png')
fingers = predicthand(thresholded, hand_segment)
# Display count
cv2.putText(frame_copy, str(fingers), (70, 45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Also display the thresholded image
cv2.imshow("Thesholded", thresholded)
if start_recording == True:
name_file = "Recordings/fist_" + str(image_num) + '.png'
cv2.putText(frame_copy, "Recording....", (200, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.putText(frame_copy, name_file, (200, 450), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 1)
#cv2.imshow("recoding",frame_copy)
if image_num<100:
# Mention the directory in which you wanna store the images followed by the image name
cv2.imwrite(name_file, thresholded)
#We are recording every 5 frames
if num_frames%5 == 0:
image_num = image_num+1
print("image_num :",image_num)
else:
start_recording =False
cv2.putText(frame_copy, " ", (200, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Draw ROI Rectangle on frame copy
cv2.rectangle(frame_copy, (roi_left, roi_top), (roi_right, roi_bottom), (0,0,255), 5)
# increment the number of frames for tracking
num_frames += 1
# Display the frame with segmented hand
cv2.imshow("Finger Count", frame_copy)
# Close windows with Esc
k = cv2.waitKey(1) & 0xFF
if k == ord("s"):
start_recording = True
if k == 27:
break
# Release the camera and destroy all the windows
cam.release()
cv2.destroyAllWindows()
|
nvwave.py | #nvwave.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Provides a simple Python interface to playing audio using the Windows multimedia waveOut functions, as well as other useful utilities.
"""
import threading
from ctypes import *
from ctypes.wintypes import *
import winKernel
import wave
import config
__all__ = (
"WavePlayer", "getOutputDeviceNames", "outputDeviceIDToName", "outputDeviceNameToID",
)
winmm = windll.winmm
HWAVEOUT = HANDLE
LPHWAVEOUT = POINTER(HWAVEOUT)
class WAVEFORMATEX(Structure):
_fields_ = [
("wFormatTag", WORD),
("nChannels", WORD),
("nSamplesPerSec", DWORD),
("nAvgBytesPerSec", DWORD),
("nBlockAlign", WORD),
("wBitsPerSample", WORD),
("cbSize", WORD)
]
LPWAVEFORMATEX = POINTER(WAVEFORMATEX)
class WAVEHDR(Structure):
pass
LPWAVEHDR = POINTER(WAVEHDR)
WAVEHDR._fields_ = [
("lpData", LPSTR),
("dwBufferLength", DWORD),
("dwBytesRecorded", DWORD),
("dwUser", DWORD),
("dwFlags", DWORD),
("dwLoops", DWORD),
("lpNext", LPWAVEHDR),
("reserved", DWORD)
]
WHDR_DONE = 1
WAVE_FORMAT_PCM = 1
WAVE_MAPPER = -1
MMSYSERR_NOERROR = 0
CALLBACK_NULL = 0
#CALLBACK_FUNCTION = 0x30000
CALLBACK_EVENT = 0x50000
#waveOutProc = CFUNCTYPE(HANDLE, UINT, DWORD, DWORD, DWORD)
#WOM_DONE = 0x3bd
MAXPNAMELEN = 32
class WAVEOUTCAPS(Structure):
_fields_ = [
('wMid', WORD),
('wPid', WORD),
('vDriverVersion', c_uint),
('szPname', WCHAR*MAXPNAMELEN),
('dwFormats', DWORD),
('wChannels', WORD),
('wReserved1', WORD),
('dwSupport', DWORD),
]
# Set argument types.
winmm.waveOutOpen.argtypes = (LPHWAVEOUT, UINT, LPWAVEFORMATEX, DWORD, DWORD, DWORD)
# Initialize error checking.
def _winmm_errcheck(res, func, args):
if res != MMSYSERR_NOERROR:
buf = create_unicode_buffer(256)
winmm.waveOutGetErrorTextW(res, buf, sizeof(buf))
raise WindowsError(res, buf.value)
for func in (
winmm.waveOutOpen, winmm.waveOutPrepareHeader, winmm.waveOutWrite, winmm.waveOutUnprepareHeader,
winmm.waveOutPause, winmm.waveOutRestart, winmm.waveOutReset, winmm.waveOutClose,
winmm.waveOutGetDevCapsW
):
func.errcheck = _winmm_errcheck
class WavePlayer(object):
"""Synchronously play a stream of audio.
To use, construct an instance and feed it waveform audio using L{feed}.
"""
#: A lock to prevent WaveOut* functions from being called simultaneously, as this can cause problems even if they are for different HWAVEOUTs.
_global_waveout_lock = threading.RLock()
def __init__(self, channels, samplesPerSec, bitsPerSample, outputDevice=WAVE_MAPPER, closeWhenIdle=True):
"""Constructor.
@param channels: The number of channels of audio; e.g. 2 for stereo, 1 for mono.
@type channels: int
@param samplesPerSec: Samples per second (hz).
@type samplesPerSec: int
@param bitsPerSample: The number of bits per sample.
@type bitsPerSample: int
@param outputDevice: The device ID or name of the audio output device to use.
@type outputDevice: int or basestring
@param closeWhenIdle: If C{True}, close the output device when no audio is being played.
@type closeWhenIdle: bool
@note: If C{outputDevice} is a name and no such device exists, the default device will be used.
@raise WindowsError: If there was an error opening the audio output device.
"""
self.channels=channels
self.samplesPerSec=samplesPerSec
self.bitsPerSample=bitsPerSample
if isinstance(outputDevice, basestring):
outputDevice = outputDeviceNameToID(outputDevice, True)
self.outputDeviceID = outputDevice
#: If C{True}, close the output device when no audio is being played.
#: @type: bool
self.closeWhenIdle = closeWhenIdle
self._waveout = None
self._waveout_event = winKernel.kernel32.CreateEventW(None, False, False, None)
self._waveout_lock = threading.RLock()
self._lock = threading.RLock()
self.open()
def open(self):
"""Open the output device.
This will be called automatically when required.
It is not an error if the output device is already open.
"""
with self._waveout_lock:
if self._waveout:
return
wfx = WAVEFORMATEX()
wfx.wFormatTag = WAVE_FORMAT_PCM
wfx.nChannels = self.channels
wfx.nSamplesPerSec = self.samplesPerSec
wfx.wBitsPerSample = self.bitsPerSample
wfx.nBlockAlign = self.bitsPerSample / 8 * self.channels
wfx.nAvgBytesPerSec = self.samplesPerSec * wfx.nBlockAlign
waveout = HWAVEOUT(0)
with self._global_waveout_lock:
winmm.waveOutOpen(byref(waveout), self.outputDeviceID, LPWAVEFORMATEX(wfx), self._waveout_event, 0, CALLBACK_EVENT)
self._waveout = waveout.value
self._prev_whdr = None
def feed(self, data):
"""Feed a chunk of audio data to be played.
This is normally synchronous.
However, synchronisation occurs on the previous chunk, rather than the current chunk; i.e. calling this while no audio is playing will begin playing the chunk but return immediately.
This allows for uninterrupted playback as long as a new chunk is fed before the previous chunk has finished playing.
@param data: Waveform audio in the format specified when this instance was constructed.
@type data: str
@raise WindowsError: If there was an error playing the audio.
"""
whdr = WAVEHDR()
whdr.lpData = data
whdr.dwBufferLength = len(data)
with self._lock:
with self._waveout_lock:
self.open()
with self._global_waveout_lock:
winmm.waveOutPrepareHeader(self._waveout, LPWAVEHDR(whdr), sizeof(WAVEHDR))
try:
with self._global_waveout_lock:
winmm.waveOutWrite(self._waveout, LPWAVEHDR(whdr), sizeof(WAVEHDR))
except WindowsError, e:
self.close()
raise e
self.sync()
self._prev_whdr = whdr
def sync(self):
"""Synchronise with playback.
This method blocks until the previously fed chunk of audio has finished playing.
It is called automatically by L{feed}, so usually need not be called directly by the user.
"""
with self._lock:
if not self._prev_whdr:
return
assert self._waveout, "waveOut None before wait"
while not (self._prev_whdr.dwFlags & WHDR_DONE):
winKernel.waitForSingleObject(self._waveout_event, winKernel.INFINITE)
with self._waveout_lock:
assert self._waveout, "waveOut None after wait"
with self._global_waveout_lock:
winmm.waveOutUnprepareHeader(self._waveout, LPWAVEHDR(self._prev_whdr), sizeof(WAVEHDR))
self._prev_whdr = None
def pause(self, switch):
"""Pause or unpause playback.
@param switch: C{True} to pause playback, C{False} to unpause.
@type switch: bool
"""
with self._waveout_lock:
if not self._waveout:
return
if switch:
with self._global_waveout_lock:
winmm.waveOutPause(self._waveout)
else:
with self._global_waveout_lock:
winmm.waveOutRestart(self._waveout)
def idle(self):
"""Indicate that this player is now idle; i.e. the current continuous segment of audio is complete.
This will first call L{sync} to synchronise with playback.
If L{closeWhenIdle} is C{True}, the output device will be closed.
A subsequent call to L{feed} will reopen it.
"""
with self._lock:
self.sync()
with self._waveout_lock:
if not self._waveout:
return
if self.closeWhenIdle:
self._close()
def stop(self):
"""Stop playback.
"""
with self._waveout_lock:
if not self._waveout:
return
try:
with self._global_waveout_lock:
# Pausing first seems to make waveOutReset respond faster on some systems.
winmm.waveOutPause(self._waveout)
winmm.waveOutReset(self._waveout)
except WindowsError:
# waveOutReset seems to fail randomly on some systems.
pass
# Unprepare the previous buffer and close the output device if appropriate.
self.idle()
def close(self):
"""Close the output device.
"""
self.stop()
with self._lock:
with self._waveout_lock:
if not self._waveout:
return
self._close()
def _close(self):
with self._global_waveout_lock:
winmm.waveOutClose(self._waveout)
self._waveout = None
def __del__(self):
self.close()
winKernel.kernel32.CloseHandle(self._waveout_event)
self._waveout_event = None
def _getOutputDevices():
caps = WAVEOUTCAPS()
for devID in xrange(-1, winmm.waveOutGetNumDevs()):
try:
winmm.waveOutGetDevCapsW(devID, byref(caps), sizeof(caps))
yield devID, caps.szPname
except WindowsError:
# It seems that in certain cases, Windows includes devices which cannot be accessed.
pass
def getOutputDeviceNames():
"""Obtain the names of all audio output devices on the system.
@return: The names of all output devices on the system.
@rtype: [str, ...]
"""
return [name for ID, name in _getOutputDevices()]
def outputDeviceIDToName(ID):
"""Obtain the name of an output device given its device ID.
@param ID: The device ID.
@type ID: int
@return: The device name.
@rtype: str
"""
caps = WAVEOUTCAPS()
try:
winmm.waveOutGetDevCapsW(ID, byref(caps), sizeof(caps))
except WindowsError:
raise LookupError("No such device ID")
return caps.szPname
def outputDeviceNameToID(name, useDefaultIfInvalid=False):
"""Obtain the device ID of an output device given its name.
@param name: The device name.
@type name: str
@param useDefaultIfInvalid: C{True} to use the default device (wave mapper) if there is no such device,
C{False} to raise an exception.
@return: The device ID.
@rtype: int
@raise LookupError: If there is no such device and C{useDefaultIfInvalid} is C{False}.
"""
for curID, curName in _getOutputDevices():
if curName == name:
return curID
# No such ID.
if useDefaultIfInvalid:
return WAVE_MAPPER
else:
raise LookupError("No such device name")
fileWavePlayer = None
fileWavePlayerThread=None
def playWaveFile(fileName, async=True):
"""plays a specified wave file.
"""
global fileWavePlayer, fileWavePlayerThread
f = wave.open(fileName,"r")
if f is None: raise RuntimeError("can not open file %s"%fileName)
if fileWavePlayer is not None:
fileWavePlayer.stop()
fileWavePlayer = WavePlayer(channels=f.getnchannels(), samplesPerSec=f.getframerate(),bitsPerSample=f.getsampwidth()*8, outputDevice=config.conf["speech"]["outputDevice"])
fileWavePlayer.feed(f.readframes(f.getnframes()))
if async:
if fileWavePlayerThread is not None:
fileWavePlayerThread.join()
fileWavePlayerThread=threading.Thread(target=fileWavePlayer.idle)
fileWavePlayerThread.start()
else:
fileWavePlayer.idle()
|
look_at_closest_person.py | #!/usr/bin/env python
# The look-at-closest-person action in a task plan
import numpy as np
from threading import Thread
import rospy
from task_executor.abstract_step import AbstractStep
from rail_people_detection_msgs.msg import Person, DetectionContext
from .look import LookAction
class LookAtClosestPersonAction(AbstractStep):
CLOSEST_PERSON_TOPIC = "/rail_people_detector/closest_person"
POSITION_CHANGE_HEAD_FOLLOW_THRESHOLD = 0.02
HEAD_ACTION_DURATION = 0.1
def init(self, name):
self.name = name
# Is this background behaviour enabled or is it disabled?
self.enabled = False
# Variable to keep track of the person that we're looking at
self._closest_person = None
self._desired_person_id = None
self._last_look_pose = None # Don't want erratic look behaviour
self._closest_person_sub = rospy.Subscriber(
LookAtClosestPersonAction.CLOSEST_PERSON_TOPIC,
Person,
self._on_closest_person
)
# The background thread to do the looking. Don't run it unless we need
# to. At that point spawn and start the thread
self._look_thread = None
# The look action
self._look_action = LookAction()
# Initialize the sub action
self._look_action.init('look_look_at_closest_person')
self._look_action._duration = LookAtClosestPersonAction.HEAD_ACTION_DURATION
def run(self, enable, person_id=""):
rospy.loginfo("Action {}: {}".format(
self.name,
"Enable{}".format(
". Looking at {}".format(person_id) if person_id else ""
) if enable else "Disable"
))
# Reset the variables if this is a new person
if self._desired_person_id != person_id:
if self._look_thread is not None:
self.enabled = False
self._look_thread.join()
self._look_thread = None
self._closest_person = None
self._last_look_pose = None
# Set the variables
self._desired_person_id = person_id
self.enabled = enable
if self.enabled and self._look_thread is None:
self._look_thread = Thread(target=self._lookat_person)
self._look_thread.start()
# There's no failure here
yield self.set_succeeded()
def stop(self):
self.enabled = False
self._look_action.stop()
def _on_closest_person(self, msg):
# If we're disabled, then there's nothing to do here
if not self.enabled:
return
# If this person is the one we're meant to be tracking or if we're
# meant to track anyone, then add this person
if not self._desired_person_id or msg.id == self._desired_person_id:
self._closest_person = msg
def _lookat_person(self):
while not rospy.is_shutdown() and self.enabled:
# Don't bother doing anything if we are disabled
if self._closest_person is None:
continue
# Check if the closest person's look location is within bounds. If
# not send a look command
if self._last_look_pose is None or np.sqrt(
(self._closest_person.pose.position.x - self._last_look_pose.position.x) ** 2
+ (self._closest_person.pose.position.y - self._last_look_pose.position.y) ** 2
+ (self._closest_person.pose.position.z - self._last_look_pose.position.z) ** 2
) >= LookAtClosestPersonAction.POSITION_CHANGE_HEAD_FOLLOW_THRESHOLD:
self._last_look_pose = self._closest_person.pose
self._look_action(pose={
'x': self._last_look_pose.position.x,
'y': self._last_look_pose.position.y,
'z': self._last_look_pose.position.z,
'frame': self._closest_person.header.frame_id,
})
|
main.py |
import threading
from slackbot.bot import Bot
from app.rtm.emoji_fetcher import connect
import app
def main():
rtm_reaction_loop = threading.Thread(target=connect)
rtm_reaction_loop.start()
bot = Bot()
bot.run()
if __name__ == "__main__":
main()
|
subscriptions.py | import gc
import logging
import threading
import ujson
import websocket
import time
import random
from typing import List, Optional, Dict, Union
from queue import Queue
from tinkoff_invest.config import EVENTS_PROCESSING_WORKERS_COUNT
from tinkoff_invest.base_strategy import BaseStrategy
from tinkoff_invest.models.candle import Candle
from tinkoff_invest.models.instrument_status import InstrumentStatus
from tinkoff_invest.models.types import SubscriptionInterval, SubscriptionEventType
from tinkoff_invest.models.order_book import OrderBook
_SUBSCRIPTION_RETRIES_COUNT = 15
_SUBSCRIPTION_TIMEOUT_SEC = 60
def _build_subscription_name(figi: str, obj_type: str, param: str) -> str:
return "{}_{}_{}".format(figi, obj_type, param)
def _parse_subscription_name(name: str) -> (str, str, str):
return name.split('_')
class SubscriptionManager:
MAX_RECONNECT_ATTEMPTS = 5
def __init__(self, server: str, token: str):
self._ws_server: str = server
self._token: str = token
self._subscriptions: Dict[str, List[Dict[str, Union[Dict, BaseStrategy]]]] = {}
self._web_socket: Optional[websocket.WebSocketApp] = None
self._workers: List[threading.Thread] = []
self._queue: Queue = Queue()
self._connection_established: bool = False
self._stop_flag: bool = False
self._shall_reconnect: bool = False
self._reconnect_retries: int = 0
def __del__(self):
self._deinitialize_workers()
if self._connection_established:
self._web_socket.close()
self._connection_established = False
def _initialize_workers(self) -> None:
for i in range(EVENTS_PROCESSING_WORKERS_COUNT):
thread = threading.Thread(target=self._worker, daemon=True)
thread.start()
self._workers.append(thread)
logging.info("%d workers are ready to process events", EVENTS_PROCESSING_WORKERS_COUNT)
def _deinitialize_workers(self) -> None:
if not self._workers:
return
logging.info("Shutdown subscription workers")
self._stop_flag = True
for worker in self._workers:
try:
if worker.is_alive():
worker.join(1)
except Exception as err:
logging.error("Unable to join tread, {}".format(err))
self._workers: List[threading.Thread] = []
def _ws_connect(self) -> None:
while True:
self._web_socket = websocket.WebSocketApp(self._ws_server, ["Authorization: Bearer " + self._token],
on_message=self._on_subscription_event, on_error=self._on_error,
on_close=self._on_close, on_open=self._on_open)
logging.info("Starting WebSocketApp connection")
self._web_socket.run_forever()
while self._shall_reconnect and self._reconnect_retries < self.MAX_RECONNECT_ATTEMPTS:
gc.collect()
logging.info("Connection lost, trying to reconnect...")
# sleep between reties should increase such as truncated binary exponential backoff with 32 seconds limit
sleep_time = 2 ** self._reconnect_retries + random.uniform(0, 1) if self._reconnect_retries < 6 else 32
logging.info(f"...sleeping {sleep_time:.2f} seconds before retry...")
time.sleep(sleep_time)
self._reconnect_retries += 1
self._web_socket.keep_running = True
self._web_socket.run_forever()
self._shall_reconnect = False
if self._web_socket:
self._web_socket.close()
self._web_socket = None
def _initialize_web_sockets(self) -> None:
self._ws_thread: threading.Thread = threading.Thread(target=self._ws_connect, daemon=True)
self._ws_thread.start()
while True:
if self._connection_established:
break
time.sleep(0.01)
logging.info("Web socket client started")
def _on_open(self, _) -> None:
self._connection_established = True
self._reconnect_retries = 0
logging.info("Web socket connection opened")
if self._shall_reconnect:
self._restart_workers_and_resubscribe()
def _on_error(self, _, error: Exception) -> None:
logging.exception(error)
self._deinitialize_workers()
self._connection_established = False
self._shall_reconnect = True
def _on_close(self, _1, _2, _3) -> None:
logging.warning("Web socket has been closed")
self._deinitialize_workers()
self._connection_established = False
def _worker(self) -> None:
while True:
try:
if self._stop_flag:
logging.info("Shutdown worker")
break
if self._queue.qsize() > EVENTS_PROCESSING_WORKERS_COUNT - 1:
logging.warning("Too many events to process: {}".format(self._queue.qsize()))
event = self._queue.get()
obj = ujson.loads(event)
if obj["event"] == SubscriptionEventType.CANDLE.value:
candle = Candle(obj["payload"])
name = _build_subscription_name(obj["payload"]["figi"], obj["event"], obj["payload"]["interval"])
for subscription in self._subscriptions[name]:
subscription['strategy'].on_candle(candle)
elif obj["event"] == SubscriptionEventType.ORDER_BOOK.value:
order_book = OrderBook(obj["payload"])
name = _build_subscription_name(obj["payload"]["figi"], obj["event"], obj["payload"]["depth"])
for subscription in self._subscriptions[name]:
subscription['strategy'].on_order_book(order_book)
elif obj["event"] == SubscriptionEventType.INSTRUMENT.value:
info = InstrumentStatus(obj["payload"])
name = _build_subscription_name(obj["payload"]["figi"], obj["event"], "")
for subscription in self._subscriptions[name]:
subscription['strategy'].on_instrument_info(info)
else:
raise Exception("An unsupported event type '{}'".format(obj["event"]))
logging.debug("Event has been processed: %s", event)
self._queue.task_done()
except Exception as err:
self._on_error(self._ws_server, err)
def _on_subscription_event(self, _, event: str) -> None:
logging.debug("New event: %s", event)
if not self._stop_flag:
self._queue.put(event)
def _subscribe(self, argument: dict, subscription_name: str, strategy: BaseStrategy) -> None:
if not self._web_socket:
self._initialize_web_sockets()
self._initialize_workers()
self._web_socket.send(ujson.dumps(argument))
if subscription_name not in self._subscriptions:
self._subscriptions[subscription_name] = [{'argument': argument,
'strategy': strategy}]
else:
self._subscriptions[subscription_name].append({'argument': argument,
'strategy': strategy})
def _restart_workers_and_resubscribe(self):
self._shall_reconnect = False
self._stop_flag = False
self._initialize_workers()
for subscription_name, strategies in self._subscriptions.items():
logging.info(f"Resubscribing to subscription: {subscription_name}")
for strategy_details in strategies:
logging.info(f"...using strategy {strategy_details['strategy']} with {strategy_details['argument']}")
self._web_socket.send(ujson.dumps(strategy_details['argument']))
def _unsubscribe(self, argument: dict, subscription_name: str) -> None:
self._web_socket.send(ujson.dumps(argument))
if len(self._subscriptions[subscription_name]) == 1:
del self._subscriptions[subscription_name]
else:
pass # TODO: how to remove specific strategy
def subscribe_to_candles(self, figi: str, interval: SubscriptionInterval, strategy: BaseStrategy) -> None:
subscription_name = _build_subscription_name(figi, "candle", interval.value)
self._subscribe({"event": "candle:subscribe", "figi": figi, "interval": interval.value},
subscription_name, strategy)
logging.info("Candle subscription created (%s, %s)", figi, interval.value)
def unsubscribe_from_candles(self, figi: str, interval: SubscriptionInterval) -> None:
subscription_name = _build_subscription_name(figi, "candle", interval.value)
self._unsubscribe({"event": "candle:unsubscribe", "figi": figi, "interval": interval.value}, subscription_name)
logging.info("Candle subscription removed (%s, %s)", figi, interval.value)
def subscribe_to_order_book(self, figi: str, depth: int, strategy: BaseStrategy) -> None:
assert (0 < depth <= 20), "Depth should be > 0 and <= 20"
subscription_name = _build_subscription_name(figi, "orderbook", str(depth))
self._subscribe({"event": "orderbook:subscribe", "figi": figi, "depth": depth}, subscription_name, strategy)
logging.info("OrderBook subscription created (%s, %s)", figi, str(depth))
def unsubscribe_from_order_book(self, figi: str, depth: int) -> None:
subscription_name = _build_subscription_name(figi, "orderbook", str(depth))
self._unsubscribe({"event": "orderbook:unsubscribe", "figi": figi, "depth": depth}, subscription_name)
logging.info("OrderBook subscription removed (%s, %s)", figi, str(depth))
def subscribe_to_instrument_info(self, figi: str, strategy: BaseStrategy) -> None:
subscription_name = _build_subscription_name(figi, "instrument_info", "")
self._subscribe({"event": "instrument_info:subscribe", "figi": figi}, subscription_name, strategy)
logging.info("InstrumentInfo subscription created (%s)", figi)
def unsubscribe_from_instrument_info(self, figi: str) -> None:
subscription_name = _build_subscription_name(figi, "instrument_info", "")
self._unsubscribe({"event": "instrument_info:unsubscribe", "figi": figi}, subscription_name)
logging.info("InstrumentInfo subscription removed (%s)", figi)
|
SpreadingServer.py | #!/usr/bin/env python3
import socket
import threading
import time
from message import Message
import TorzelaUtils as TU
class SpreadingServer:
# nextServers is an array of tuples in the form
# (<IP>, <Port>)
# where <IP> is the IP address of a Dead Drop and
# <Port> is the port that the Dead Drop is listening on
def __init__(self, nextServers, localPort):
self.nextServers = nextServers
self.localPort = localPort
# We only allow one connect to the SpreadingServer
# Initialize these to 0 here, we will set them
# later when we get the initial connection
self.previousServerIP = 0
self.previousServerPort = 0
# Used for onion rotuing in the conversational protocol
# The keys and messages will be updated each round
self.clientLocalKeys = []
self.clientMessages = []
self.nMessages = 0
# The server keys
self.__privateKey, self.publicKey = TU.generateKeys(
TU.createKeyGenerator() )
# We need to wait for all connections to setup, so create
# an integer and initialize it with the number of dead drops
# we are connecting to. Every time we successfully connect to
# one, decrement this value. When it is equal to 0, we know
# all of the connections are good
self.allConnectionsGood = len(nextServers)
for ddServer in nextServers:
# We need to spawn off a thread here, else we will block
# the entire program.
threading.Thread(target=self.setupConnection, args=(ddServer,)).start()
# Setup main listening socket to accept incoming connections
threading.Thread(target=self.listen, args=()).start()
def getPublicKey(self):
return self.publicKey
def setupConnection(self, ddServer):
# Before we can connect to the next server, we need
# to send a setup message to the next server
setupMsg = Message()
setupMsg.setType(0)
setupMsg.setPayload("{}".format(self.localPort))
connectionMade = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while not connectionMade:
try:
sock.connect(ddServer)
sock.sendall(str.encode(str(setupMsg)))
connectionMade = True
# When self.allConnectionsGood is 0, we know all of
# the connections have been setup properly
self.allConnectionsGood -= 1
except:
# Put a delay here so we don't burn CPU time
time.sleep(1)
sock.close()
# This is where all incoming messages are handled
def listen(self):
# Wait until we have connected to the next server
while self.allConnectionsGood != 0:
time.sleep(1)
# Listen for incoming connections
self.listenSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenSock.bind(('localhost', self.localPort))
self.listenSock.listen(10)
while True:
print("SpreadingServer awaiting connection")
conn, client_addr = self.listenSock.accept()
print("SpreadingServer accepted connection from " + str(client_addr))
# Spawn a thread to handle the client
threading.Thread(target=self.handleMsg, args=(conn, client_addr,)).start()
# This runs in a thread and handles messages from clients
def handleMsg(self, conn, client_addr):
# Receive data from client
clientData = conn.recv(32768).decode("utf-8")
# Format as message
clientMsg = Message()
clientMsg.loadFromString(clientData)
if clientMsg.getNetInfo() != 1 and clientMsg.getNetInfo() != 2:
print("Spreading Server got " + clientData)
# Check if the packet is for setting up a connection
if clientMsg.getNetInfo() == 0:
# If it is, record it's IP and Port
self.previousServerIP = client_addr[0]
self.previousServerPort = int(clientMsg.getPayload())
conn.close()
elif clientMsg.getNetInfo() == 1:
print("Spreading Server received message from Middle server")
# In here, we handle messages going from a client towards a dead drop
# Send message to all dead drops
# TODO -> Add lock to this whole part
if self.nMessages <= len(self.clientMessages):
print("Spreading server error: received more messages than expected")
# Decrypt one layer of the onion message
deadDropServer, clientLocalKey, newPayload = TU.decryptOnionLayer(
self.__privateKey, clientMsg.getPayload(), serverType=1)
clientMsg.setPayload(newPayload)
# TODO (jose): deadDropServer contains towards which server
# the message has to be sent, manage that
# Save the message data
self.clientLocalKeys.append(clientLocalKey)
self.clientMessages.append(clientMsg)
if self.nMessages == len(self.clientMessages):
self.forwardMessages()
elif clientMsg.getNetInfo() == 2:
print("Spreading Server received message from Dead Drop server")
# Here we handle messages coming from a dead drop back
# towards a client. Just forward back to server
if self.nMessages <= len(self.clientMessages):
print("Middle server error: received more messages than expected")
# Encrypt one layer of the onion message
clientLocalKey = self.clientLocalKeys[ len(self.clientMessages) ]
newPayload = TU.encryptOnionLayer(self.__privateKey,
clientLocalKey,
clientMsg.getPayload())
clientMsg.setPayload(newPayload)
self.clientMessages.append(clientMsg)
if self.nMessages == len(self.clientMessages):
self.forwardResponses()
elif clientMsg.getNetInfo() == 3:
# Dialing Protocol: Client -> DeadDrop
# Onion routing stuff
deadDropServer, self.clientLocalKey, newPayload = TU.decryptOnionLayer(
self.__privateKey, clientMsg.getPayload(), serverType=1)
clientMsg.setPayload(newPayload)
# TODO (matthew): deadDropServer contains towards which server
# the message has to be sent :D
for ddrop in self.nextServers:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(ddrop)
self.sock.sendall(str(clientMsg).encode("utf-8"))
self.sock.close()
elif clientMsg.getNetInfo() == 4:
# In here, we handle the first message sent by the previous server.
# It notifies us of a new round and how many messages are coming
# TODO -> Add lock to this whole part
self.nMessages = int(clientMsg.getPayload())
self.clientMessages = []
self.clientLocalKeys = []
# Assuming that the messages are stores in self.clientMessages this method
# adds noise, shuffles the messages and forwards them to the next server
def forwardMessages(self):
# TODO (jose): Noise addition goes here
# Apply the mixnet by shuffling the messages
self.permutation = TU.generatePermutation(self.nMessages)
shuffledMessages = TU.shuffleWithPermutation(self.clientMessages,
self.permutation)
# Also shuffle the messages so they still match the clientMessages:
# self.clientLocalKeys[ i ] is the key that unlocks message self.clientMessges[ i ]
# This is used afterwards in handleMessage, getNetInfo() == 2
self.clientLocalKeys = TU.shuffleWithPermutation(self.clientLocalKeys,
self.permutation)
# Forward all the messages to the next server
# Send a message to the next server notifying of the numbers of
# messages that will be sent
firstMsg = Message()
firstMsg.setNetInfo(4)
firstMsg.setPayload("{}".format(self.nMessages))
# TODO send it only to the correct dds and the correct number of messages
for ddrop in self.nextServers:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(ddrop)
sock.sendall(str(firstMsg).encode("utf-8"))
sock.close()
# Send all the messages to the next server
# TODO send it only to the correct dds
for msg in shuffledMessages:
for ddrop in self.nextServers:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(ddrop)
sock.sendall(str(msg).encode("utf-8"))
sock.close()
# Restart the messages so that we receive the responses from the
# next server
self.clientMessages = []
def forwardResponses(self):
# Unshuffle the messages
self.clientMessages = TU.unshuffleWithPermutation(self.clientMessages,
self.permutation)
# Send the responses back to the previous server
for msg in self.clientMessages:
tempSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSock.connect((self.previousServerIP, self.previousServerPort))
tempSock.sendall(str(msg).encode("utf-8"))
tempSock.close()
|
test_worker.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
create_file, create_file_after_timeout, div_by_zero, do_nothing, say_hello,
say_pid, run_dummy_heroku_worker, access_self, modify_self,
modify_self_and_error, long_running_job, save_key_ttl
)
from rq import (get_failed_queue, Queue, SimpleWorker, Worker,
get_current_connection)
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus
from rq.registry import StartedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.worker import HerokuWorker, WorkerStatus
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed queue."""
q = Queue()
failed_q = get_failed_queue()
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,))
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
birth = self.testconn.hget(w.key, 'birth')
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertTrue(birth is not None)
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
failed_q = get_failed_queue()
# Preconditions
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertIsNotNone(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
q = Queue()
job = q.enqueue(div_by_zero)
w = Worker([q])
w.register_birth()
self.assertEqual(w.failed_job_count, 0)
self.assertEqual(w.successful_job_count, 0)
self.assertEqual(w.total_working_time, 0)
registry = StartedJobRegistry(connection=w.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
w.handle_job_failure(job)
w.handle_job_success(job, q, registry)
w.refresh()
self.assertEqual(w.failed_job_count, 1)
self.assertEqual(w.successful_job_count, 1)
self.assertEqual(w.total_working_time, 1500000) # 1.5 seconds in microseconds
w.handle_job_failure(job)
w.handle_job_success(job, q, registry)
w.refresh()
self.assertEqual(w.failed_job_count, 2)
self.assertEqual(w.successful_job_count, 2)
self.assertEqual(w.total_working_time, 3000000)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
failed_q = get_failed_queue()
# Preconditions
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
w = Worker([q], exception_handlers=black_hole)
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 0)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.is_failed, True)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn._ttl(job.key), 0)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn._ttl(job.key), -1)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, 1, 'foo')
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, 1, 'bar')
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, 1, 'foo')
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(set(job_check.meta.keys()),
set(['foo', 'baz', 'newinfo']))
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
failed_q = get_failed_queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(set(job_check.meta.keys()),
set(['foo', 'baz', 'newinfo']))
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for 10 seconds')
self.assertIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
def kill_worker(pid, double_kill):
# wait for the worker to be started over on the main process
time.sleep(0.5)
os.kill(pid, signal.SIGTERM)
if double_kill:
# give the worker time to switch signal handler
time.sleep(0.5)
os.kill(pid, signal.SIGTERM)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
failed_q = get_failed_queue()
self.assertEqual(failed_q.count, 0)
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
self.assertEqual(failed_q.count, 1)
self.assertEqual(fooq.count, 0)
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
assert get_failed_queue().count == 0
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
assert get_failed_queue().count == 0
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGALRM)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
def test_handle_shutdown_request(self):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
self.worker.failed_queue = Mock()
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
def test_move_to_failed_queue_handles_non_ascii_in_exception_message(self):
"""Test that move_to_failed_queue doesn't crash on non-ascii in exception message."""
self.worker.move_to_failed_queue(Mock(), *self.exc_info)
|
views.py | """
Views for using in actions app.
"""
import threading
from django.contrib.auth.decorators import user_passes_test
from django.core.checks import messages
from django.core.files.base import ContentFile
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.utils import timezone
from .models import ActionLog
from .scripts import update_beatmap_action_script
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
def actions(request):
"""
View for Action menu.
This view can only access by superuser and staff.
"""
context = {
'action_log': ActionLog.objects.all().order_by('-id'),
'update_beatmap_running': ActionLog.objects.filter(name="Update all beatmaps metadata", status=1).exists(),
}
return render(request, 'actions/actions.html', context)
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
def update_beatmap_action(request):
"""
View for activate the new runner for running update_beatmap_action_script function.
This view can only activate by superuser and staff.
"""
# If this action is already running, return error message.
if ActionLog.objects.filter(name="Update all beatmaps metadata", status=1).exists():
messages.error(request, "This action is already running!")
return redirect('actions')
# Create a new action log for binding with the worker.
action_log = ActionLog()
action_log.name = "Update all beatmaps metadata"
action_log.running_text = "Start working thread..."
action_log.status = 1
action_log.start_user = request.user
action_log.save()
action_log.log.save(f"log_{action_log.id}.log", ContentFile(f'# Log for worker ID : {action_log.id}\n'))
# Start a new thread to work on this action.
thread_worker = threading.Thread(target=update_beatmap_action_script, args=[action_log])
thread_worker.setDaemon(True)
thread_worker.start()
messages.success(request, f"Start your cute bot successfully! (Log ID : {action_log.id})")
return redirect('actions')
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
def check_action_log(request, log_id):
"""
View for API that request in Actions page to check the status of the action log.
It will return the value that used in live updating the Action status.
This view can only access by superuser and staff.
"""
action = get_object_or_404(ActionLog, id=log_id)
try:
if action.status == 1 or action.status == 0:
# The action is running or in idle state, it will return the start time minus the current time in seconds.
duration = (timezone.now() - action.time_start).total_seconds()
elif action.status == 2:
# The action is finished, it will return the duration that tasks is running (fimished - start) in seconds.
duration = (action.time_finish - action.time_start).total_seconds()
else:
# To avoid error in case that task is failed to run, it will return as unknown.
duration = "Unknown"
except TypeError:
# The time will be show as Unknown when action that is finish not have finish time
duration = "Unknown"
# If the duration is known, convert it to the readable format.
if duration != "Unknown":
hours = duration//3600
duration = duration - (hours*3600)
minutes = duration//60
seconds = duration - (minutes*60)
duration = '{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))
# Return the duration and status of the action.
if request.method == "GET":
return JsonResponse({"running_text": action.running_text, "status": action.status, "duration": duration}, status=200)
return JsonResponse({}, status=400)
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
def delete_action_log(request, log_id):
"""
View for delete the action log.
This view can only access by superuser and staff.
"""
action = get_object_or_404(ActionLog, id=log_id)
if action.status == 0 or action.status == 1:
messages.error(request, "Cannot delete the Action log that is running or in idle state!")
return redirect('actions')
action.delete()
messages.success(request, "Delete action log successfully!")
return redirect('actions')
|
shape_intersection_loader.py | import numpy as np
import time
import random
import math
import os
import sys
from multiprocessing import Process, Queue
import _pickle as cPickle
import io
import pdb
import gizeh
def draw(shape, pixel, filename=None):
resize = pixel / 10
surface = gizeh.Surface(width=pixel, height=pixel, bg_color=(0, 0, 0)) # in pixels
if shape[0] == 1:
line = gizeh.polyline(
points=[(resize * shape[1], resize * shape[2]), (resize * shape[3], resize * shape[4])],
stroke_width=1,
stroke=(1, 1, 1))
line.draw(surface)
if shape[0] == 2:
circle = gizeh.circle(r=resize * shape[3], xy=[resize * shape[1], resize * shape[2]], stroke=(1, 1, 1),
stroke_width=1)
circle.draw(surface)
if shape[0] == 3:
rect = gizeh.rectangle(lx=resize * shape[3], ly=resize * shape[4], xy=(
resize * shape[1] + resize * shape[3] / 2, resize * shape[2] + resize * shape[4] / 2),
stroke=(1, 1, 1),
stroke_width=1, angle=0)
rect.draw(surface)
img = surface.get_npimage()[:, :, 0] # returns a (width x height) numpy array
if filename is not None:
surface.write_to_png(filename + ".png")
return img
def int2onehot(lst, dim):
array = np.zeros((len(lst), dim), dtype=np.double)
for i in range(len(lst)):
array[i][lst(i)] = 1
return array
class Loader(object):
def __init__(self, path, batch_size, mask_range, pixel, ques_len, maxsize=20, embedding_size=256, shuffle=True):
self.path = path
self.shuffle = shuffle
self.ques_len = ques_len
self.batch_size = batch_size
self.epoch = 1
self.batch_idx = -1 # conj, stmt pairs supply in current epoch.
self.total_iter = -1 # total iteration
self.maxsize = maxsize
self.queue = Queue(self.maxsize)
self.reader = Process(target=self.read)
self.reader.daemon = True
self.total_size = 12800
self.total_batch = int(self.total_size / self.batch_size)
self.pixel = pixel
self.mask_range = mask_range
self.embedding_size = embedding_size
self.count = 0
self.draw = True
self.reader.start()
def next_batch(self):
data = self.queue.get()
if data is None:
self.epoch += 1
self.batch_idx = 0
else:
self.batch_idx += 1
self.total_iter += 1
return data
def read(self):
with open(self.path, 'rb') as f:
a = cPickle.load(f)
while True:
if self.shuffle:
random.shuffle(a)
bs = self.batch_size
l = len(a)
# print('file size:', l)
for i in range(l // bs):
self.queue.put(self.prepare(a[i * bs: (i + 1) * bs]))
def prepare(self, data):
X = np.zeros((self.batch_size, 12, 5))
X_length = np.zeros((self.batch_size, 12))
Y = np.zeros((self.batch_size, ))
num_sen = np.zeros((self.batch_size, ))
mask = np.zeros((self.batch_size, 12))
Ques = np.zeros((self.batch_size, self.ques_len, self.embedding_size)) # no information at all
Ques_length = np.ones((self.batch_size, )) * self.ques_len
img = np.zeros((self.batch_size, self.pixel, self.pixel, 12))
loss_mask = np.zeros((self.batch_size, self.pixel, self.pixel, 12))
l = np.array(list(map(lambda x: len(x[0]), data)))
for i in range(self.batch_size):
single_X = np.array(data[i][0])
X[i, :single_X.shape[0], :single_X.shape[1]] = single_X
X_length[i,:single_X.shape[0]] = 5
Y[i] = data[i][1]
num_sen[i] = single_X.shape[0]
mask[i,:single_X.shape[0]] = 1
# image channels for middle supervision
for j in range(l[i]):
img[i, :, :, j] = draw(data[i][0][j], self.pixel) / 255
loss_mask[i, :, :, j] = (self.mask_range - 1) * img[i, :, :, j] + 1
return X, X_length, Y, num_sen, mask, Ques, Ques_length, img, loss_mask
def destruct(self):
self.reader.terminate()
|
test_failure.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import (
relevant_errors,
wait_for_errors,
RayTestTimeoutException,
)
RAY_FORCE_DIRECT = ray_constants.direct_call_enabled()
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_fail_importing_remote_function(ray_start_2_cpus):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g():
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote()
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) >= 2, errors
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(Exception):
ray.get(g.remote())
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo(object):
def __init__(self):
self.x = module.temporary_python_file()
def get_val(self):
return 1
# There should be no errors yet.
assert len(ray.errors()) == 0
# Create an actor.
foo = Foo.remote()
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception):
ray.get(foo.get_val.remote())
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor(object):
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
ray.experimental.signal.reset = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor(object):
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor(object):
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor(object):
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor(object):
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(
ray.errors()) == 0, ("Should not have propogated an error - {}".format(
ray.errors()))
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(ray_start_2_cpus):
@ray.remote
def f():
pass
# Wait for the monitor process to start.
ray.get(f.remote())
time.sleep(1)
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylet and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.Value("HEARTBEAT_BATCH"),
ray.gcs_utils.TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB"), fake_id,
malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo(object):
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
@pytest.mark.skipif(RAY_FORCE_DIRECT, reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(shutdown_only):
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo(object):
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
wait_for_errors(ray_constants.RESOURCE_DEADLOCK_ERROR, 1, timeout=30)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo(object):
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo(object):
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo(object):
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h():
time.sleep(1)
ray.get(f.remote())
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(h.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
@pytest.mark.skipif(
sys.version_info < (3, 0), reason="This test requires Python 3.")
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo(object):
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(ray.ObjectID.from_random())
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = json.dumps({
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
})
cluster = Cluster()
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(address=cluster.address)
info = relevant_errors(ray_constants.REMOVED_NODE_ERROR)
assert len(info) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 1, timeout=2)
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=2)
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(removing_node, allow_graceful=True)
with pytest.raises(RayTestTimeoutException):
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 3, timeout=2)
# There is no connection error to a dead node.
info = relevant_errors(ray_constants.RAYLET_CONNECTION_ERROR)
assert len(info) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8
}],
indirect=True)
@pytest.mark.parametrize("num_actors", [1, 2, 5])
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head, num_actors):
@ray.remote
class LargeMemoryActor(object):
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(num_actors)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 2,
"object_store_memory": 10**8
}],
indirect=True)
def test_fill_object_store_exception(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor(object):
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.skipif(
not RAY_FORCE_DIRECT,
reason="raylet path attempts reconstruction for evicted objects")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_direct_call_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
while ray.worker.global_worker.core_worker.object_exists(obj):
time.sleep(1)
# ray.get throws an exception.
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.skipif(
not RAY_FORCE_DIRECT,
reason="raylet path attempts reconstruction for evicted objects")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_direct_call_serialized_id_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
@ray.remote
def get(obj_ids):
print("get", obj_ids)
obj_id = obj_ids[0]
assert (isinstance(ray.get(obj_id), np.ndarray))
# Evict the object.
ray.internal.free(obj_ids)
while ray.worker.global_worker.core_worker.object_exists(obj_id):
time.sleep(1)
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj_id)
print("get done", obj_ids)
obj = large_object.remote()
ray.get(get.remote([obj]))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_ids, test_dependent_task):
print("get", obj_ids)
obj_id = obj_ids[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_id)) == 1
else:
assert ray.get(obj_id) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
__init__.py | __version__ = '0.0.19'
import threading
import socket
import logging
import time
import logging
from types import SimpleNamespace
# logger for background processes (pinger and data stream)
logger = logging.getLogger('psrt')
DEFAULT_PORT = 2873
DEFAULT_TIMEOUT = 5
BUF_SIZE = 1024
PROTO_VERSION = 1
SLEEP_STEP = 0.1
OP_NOP = b'\x00'
OP_BYE = b'\xFF'
OP_PUBLISH = b'\x01\x7F' # priority hard-coded until supported
OP_PUBLISH_NO_ACK = b'\x21\x7F' # priority hard-coded until supported
OP_SUBSCRIBE = b'\x02'
OP_UNSUBSCRIBE = b'\x03'
RESPONSE_OK = 0x01
RESPONSE_ACCESS_DENIED = 0xFE
CONTROL_HEADER = b'\xEE\xAA'
DATA_HEADER = b'\xEE\xAB'
AUTH_LOGIN_PASS = b'\x00'
AUTH_KEY_AES_128_GCM = b'\x02'
AUTH_KEY_AES_256_GCM = b'\x03'
def pub_udp(target,
topic,
message,
need_ack=True,
check_ack_src=True,
auth=AUTH_LOGIN_PASS,
**kwargs):
"""
Publish message with UDP frame
Args:
target: host:port or (host, port) tuple
topic: topic to publish
message: message (string, bytes or anyting which can be str())
Optional:
* need_ack: require server acknowledge (default: True)
* check_ack_src: check acknowledge source (host/port, default: True)
* user: user name
* password: password
* timeout: socket timeout
* auth: auth mode (password is used as AES key, str or bytes)
"""
if isinstance(target, str):
host, port = target.rsplit(':', maxsplit=1)
if check_ack_src:
host = socket.gethostbyname(host)
target = (host, int(port))
elif check_ack_src:
target = (socket.gethostbyname(target[0]), target[1])
user = kwargs.get('user', '')
password = kwargs.get('password', '')
timeout = kwargs.get('timeout', DEFAULT_TIMEOUT)
topic = topic
if isinstance(message, bytes):
pass
elif isinstance(message, str):
message = message.encode()
else:
message = str(message).encode()
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if auth == AUTH_LOGIN_PASS:
client_socket.sendto(
CONTROL_HEADER + PROTO_VERSION.to_bytes(2, 'little') +
AUTH_LOGIN_PASS + user.encode() + b'\x00' + password.encode() +
b'\x00' + (OP_PUBLISH if need_ack else OP_PUBLISH_NO_ACK) +
topic.encode() + b'\x00' + message, target)
else:
from Cryptodome import Random
from Cryptodome.Cipher import AES
nonce = Random.new().read(12)
if isinstance(password, str):
import binascii
password = binascii.unhexlify(password)
cipher = AES.new(password, AES.MODE_GCM, nonce)
frame, digest = cipher.encrypt_and_digest(
(OP_PUBLISH if need_ack else OP_PUBLISH_NO_ACK) + topic.encode() +
b'\x00' + message)
client_socket.sendto(
CONTROL_HEADER + PROTO_VERSION.to_bytes(2, 'little') + auth +
user.encode() + b'\x00' + nonce + frame + digest, target)
if need_ack:
client_socket.settimeout(timeout)
(data, server) = client_socket.recvfrom(5)
if check_ack_src and server != target:
raise RuntimeError(f'Invalid ack source: {server}')
if data[0:2] != CONTROL_HEADER:
raise RuntimeError(f'Invalid control header in ack')
if int.from_bytes(data[2:4], 'little') != PROTO_VERSION:
raise RuntimeError(f'Invalid server protocol in ack')
code = data[4]
if code == RESPONSE_ACCESS_DENIED:
raise AccessError
elif code != RESPONSE_OK:
raise RuntimeError(f'Server error: {data[0]}')
class Message:
qos = 2
retain = False
class AccessError(Exception):
pass
def reduce_timeout(timeout, m):
t = timeout - (time.perf_counter() - m)
if t <= 0:
raise TimeoutError
return t
class Client:
"""
PSRT client
"""
# paho mqtt compat
def tls_set(self, ca_certs, *args, **kwargs):
self.tls = True
self.tls_ca = ca_certs
def username_pw_set(self, username, password=''):
self.user = username if username is not None else ''
self.password = password if password is not None else ''
def loop_start(self):
pass
def loop_stop(self):
pass
# end compat
def enable_logger(self):
# TODO debug log
pass
def __init__(self, **kwargs):
"""
Initialize PSRT client
Additioanal properties which can be set directly to client object:
* on_message = on_message(client, userdata, message) # message handler
* on_connect(self, client, userdata, flags, rc) # connect handler
(as the connection is performed in the current thread, on_connect is
used for paho-mqtt compat. only)
Optional:
* path: host:port or (host, port) tuple
* user: user name
* password: password
* timeout: client timeout
* buf_size: socket and message buffer (set 100K+ for large frames)
* userdata: anything useful
* tls: use TLS (default: False)
* tls_ca: path to an alternative CA file
"""
self.path = kwargs.get('path', f'localhost:{DEFAULT_PORT}')
self.user = kwargs.get('user', '')
self.password = kwargs.get('password', '')
self.timeout = kwargs.get('timeout', DEFAULT_TIMEOUT)
self.buf_size = kwargs.get('buf_size', BUF_SIZE)
self.userdata = kwargs.get('userdata')
self.tls = kwargs.get('tls', False)
self.tls_ca = kwargs.get('tls_ca')
self.connected = False
self._state = 0
self.connect_event = threading.Event()
self.control_lock = threading.Lock()
self.shutdown_lock = threading.RLock()
self._h_pinger = None
self.on_message = None
self.on_connect = None
self.need_data_socket = True
self.control_socket = None
self.data_socket = None
self.disconnect = self.bye
def _shutdown(self, from_pinger):
self.connected = False
self._state = 0
try:
self.control_socket.close()
except:
pass
try:
self.data_socket.close()
except:
pass
if not from_pinger and self._h_pinger is not None and \
self._h_pinger.is_alive():
self._h_pinger.join()
def _handle_control_response(self, from_pinger):
response = int.from_bytes(self.control_socket.recv(1), 'little')
if response != RESPONSE_OK:
if response == RESPONSE_ACCESS_DENIED:
raise AccessError
else:
self._shutdown(from_pinger)
raise RuntimeError(f'server error {self.path}: {hex(response)}')
def _exec_control_command(self, payload, from_pinger=False):
op_start = time.perf_counter()
try:
if not self.control_lock.acquire(timeout=self.timeout):
raise TimeoutError(
f'client error {self.path} control lock timeout')
try:
self.control_socket.settimeout(
reduce_timeout(self.timeout, op_start))
self.control_socket.sendall(payload)
self.control_socket.settimeout(
reduce_timeout(self.timeout, op_start))
self._handle_control_response(from_pinger)
finally:
self.control_lock.release()
except:
self._shutdown(from_pinger)
raise
def _pinger(self):
while self.connected:
try:
self._exec_control_command(OP_NOP, True)
except Exception as e:
if not self.connected:
break
logger.error(f'server {self.path} ping error: {e}')
raise
sleep_to = time.perf_counter() + self.timeout / 2
while time.perf_counter() < sleep_to and self.connected:
time.sleep(SLEEP_STEP)
def connect_cluster(self, paths, randomize=True):
"""
Connect the client to PSRT cluster
If randomize parameter is set to False, the nodes are chosen in the
listed order
Args:
paths: list of node paths (host:port or tuples) or comma separated
string
Optional:
* randomize: choose random node (default: True)
Returns:
Successful node path if connected
Raises:
RuntimeError: if no nodes available
"""
if isinstance(paths, str):
paths = [x for x in [x.strip() for x in paths.split(',')] if x]
if randomize:
import random
paths = paths.copy()
random.shuffle(paths)
for p in paths:
logger.info(f'trying PSRT node {p}')
self.path = p
try:
self.connect()
logger.info(f'PSRT node connected: {p}')
return p
except Exception as e:
logger.warning(f'Failed to connect to {p}: {e}')
raise RuntimeError('no nodes available')
def connect(self, host=None, port=DEFAULT_PORT, keepalive=None):
"""
Connect the client
Optional:
* host: ovverride server host
* port: override server port
* keepalive: not used, for paho-mqtt compat-only
"""
self.connect_event.clear()
self.connected = False
if host is None:
if ':' in self.path:
host, port = self.path.rsplit(':', maxsplit=1)
else:
host = self.path
port = DEFAULT_PORT
else:
self.path = f'{host}:{port}'
port = int(port)
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,
self.buf_size)
control_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
self.buf_size)
control_socket.settimeout(self.timeout)
op_start = time.perf_counter()
control_socket.connect((host, port))
control_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
control_socket.settimeout(reduce_timeout(self.timeout, op_start))
control_socket.sendall(CONTROL_HEADER +
(b'\x01' if self.tls else b'\x00'))
control_socket.settimeout(reduce_timeout(self.timeout, op_start))
if self.tls:
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_REQUIRED
if self.tls_ca:
context.load_verify_locations(self.tls_ca)
control_socket = context.wrap_socket(control_socket)
header = control_socket.recv(2)
if len(header) < 2:
raise RuntimeError('Server connection error')
if header != CONTROL_HEADER:
raise RuntimeError('Invalid control header')
control_socket.settimeout(reduce_timeout(self.timeout, op_start))
proto = int.from_bytes(control_socket.recv(2), 'little')
if proto != PROTO_VERSION:
raise RuntimeError('Unsupported protocol')
data = self.user.encode() + b'\x00' + self.password.encode()
control_socket.settimeout(reduce_timeout(self.timeout, op_start))
control_socket.sendall(len(data).to_bytes(4, 'little') + data)
try:
control_socket.settimeout(reduce_timeout(self.timeout, op_start))
token = control_socket.recv(32)
if not token:
raise AccessError
while len(token) < 32:
control_socket.settimeout(reduce_timeout(
self.timeout, op_start))
token += control_socket.recv(1)
except:
raise AccessError
self.control_socket = control_socket
# connect data socket
if self.need_data_socket:
try:
data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data_socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,
self.buf_size)
data_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
self.buf_size)
data_socket.settimeout(reduce_timeout(self.timeout, op_start))
data_socket.connect((host, port))
data_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
1)
data_socket.settimeout(reduce_timeout(self.timeout, op_start))
data_socket.sendall(DATA_HEADER +
(b'\x01' if self.tls else b'\x00'))
if self.tls:
data_socket = context.wrap_socket(data_socket)
data_socket.settimeout(reduce_timeout(self.timeout, op_start))
header = data_socket.recv(2)
if header != DATA_HEADER:
raise RuntimeError('Invalid data header')
data_socket.settimeout(reduce_timeout(self.timeout, op_start))
proto = int.from_bytes(data_socket.recv(2), 'little')
if proto != PROTO_VERSION:
raise RuntimeError('Unsupported protocol')
data_socket.settimeout(reduce_timeout(self.timeout, op_start))
data_socket.sendall(token +
int(self.timeout).to_bytes(1, 'little'))
data_socket.settimeout(reduce_timeout(self.timeout, op_start))
response = int.from_bytes(data_socket.recv(1), 'little')
if response != RESPONSE_OK:
self._shutdown(False)
raise RuntimeError(
f'server error (data socket) {self.path}: '
f'{hex(response)}')
data_socket.settimeout(self.timeout)
self.data_socket = data_socket
threading.Thread(target=self._t_data_stream,
daemon=True).start()
except:
try:
self.control_socket.close()
except:
pass
try:
data_socket.close()
except:
pass
raise
# run control pinger
self.connected = True
self._state = 1
self.connect_event.set()
self._h_pinger = threading.Thread(target=self._pinger, daemon=True)
self._h_pinger.start()
if self.on_connect:
self.on_connect(self, self.userdata, None, None)
def _t_data_stream(self):
try:
while True:
op_start = time.perf_counter()
self.data_socket.settimeout(self.timeout)
header = self.data_socket.recv(1)
if not header:
raise RuntimeError(
f'server {self.path} data socket disconnected')
if header[0] == RESPONSE_OK:
self.data_socket.settimeout(
reduce_timeout(self.timeout, op_start))
priority = self.data_socket.recv(1)
self.data_socket.settimeout(
reduce_timeout(self.timeout, op_start))
data_len_buf = self.data_socket.recv(4)
while len(data_len_buf) < 4:
self.data_socket.settimeout(
reduce_timeout(self.timeout, op_start))
data_len_buf += self.data_socket.recv(1)
data_len = int.from_bytes(data_len_buf, 'little')
data = b''
while len(data) < data_len:
self.data_socket.settimeout(
reduce_timeout(self.timeout, op_start))
buf_size = data_len - len(data)
data += self.data_socket.recv(
buf_size if buf_size < self.buf_size else self.
buf_size)
message = Message()
topic, message.payload = data.split(b'\x00', maxsplit=1)
message.topic = topic.decode()
if self.on_message:
self.on_message(self, self.userdata, message)
elif header[0] != 0:
raise RuntimeError(f'server {self.path} invalid data '
f'in data stream: {hex(header[0])}')
except Exception as e:
with self.shutdown_lock:
if self.connected:
logger.error(str(e))
self.bye()
def is_connected(self):
"""
Check is the client connected
"""
return self.connected
def subscribe(self, topic, qos=None):
"""
Subscribe to a topic
Args:
topic: topic name
Optional:
* qos: not used, for paho-mqtt compat-only
"""
data = topic.encode()
try:
self._exec_control_command(OP_SUBSCRIBE +
len(data).to_bytes(4, 'little') + data)
except AccessError:
raise AccessError(f'{self.path} topic {topic} sub access denied')
def subscribe_bulk(self, topics):
"""
Subscribe to topics
Args:
topics: topic names (list or tuple)
Optional:
* qos: not used, for paho-mqtt compat-only
"""
data = b'\x00'.join(t.encode() for t in topics)
self._exec_control_command(OP_SUBSCRIBE +
len(data).to_bytes(4, 'little') + data)
def unsubscribe(self, topic):
"""
Unsubscribe from a topic
Args:
topic: topic name
Optional:
* qos: not used, for paho-mqtt compat-only
"""
data = topic.encode()
self._exec_control_command(OP_UNSUBSCRIBE +
len(data).to_bytes(4, 'little') + data)
def unsubscribe_bulk(self, topics):
"""
Unsubscribe from topics
Args:
topics: topic names (list or tuple)
Optional:
* qos: not used, for paho-mqtt compat-only
"""
data = b'\x00'.join(t.encode() for t in topics)
self._exec_control_command(OP_UNSUBSCRIBE +
len(data).to_bytes(4, 'little') + data)
def publish(self, topic, message, qos=None, retain=None):
"""
Publish a message
Args:
topic: topic name
message: message (string, bytes or anyting which can be str())
Optional:
* qos: not used, for paho-mqtt compat-only
* retain: not used, for paho-mqtt compat-only
"""
topic = topic.encode()
if isinstance(message, bytes):
pass
elif isinstance(message, str):
message = message.encode()
else:
message = str(message).encode()
# copy code to avoid message copying
op_start = time.perf_counter()
if not self.control_lock.acquire(timeout=self.timeout):
raise TimeoutError(f'client error {self.path} control lock timeout')
try:
self.control_socket.settimeout(
reduce_timeout(self.timeout, op_start))
self.control_socket.sendall(OP_PUBLISH +
(len(topic) + len(message) +
1).to_bytes(4, 'little') + topic +
b'\x00')
self.control_socket.settimeout(
reduce_timeout(self.timeout, op_start))
self.control_socket.sendall(message)
try:
self._handle_control_response(False)
except AccessError:
raise AccessError(
f'{self.path} topic {topic} pub access denied')
finally:
self.control_lock.release()
def bye(self):
"""
End communcation
"""
with self.shutdown_lock:
if self.connected:
try:
self._exec_control_command(OP_BYE)
except:
pass
self._shutdown(False)
|
test_downloader_http.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import re
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import http_server_port, try_rm
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_http_server
from yt_dlp.downloader.http import HttpFD
from yt_dlp.utils import encodeFilename
import threading
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_SIZE = 10 * 1024
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def send_content_range(self, total=None):
range_header = self.headers.get('Range')
start = end = None
if range_header:
mobj = re.search(r'^bytes=(\d+)-(\d+)', range_header)
if mobj:
start = int(mobj.group(1))
end = int(mobj.group(2))
valid_range = start is not None and end is not None
if valid_range:
content_range = 'bytes %d-%d' % (start, end)
if total:
content_range += '/%d' % total
self.send_header('Content-Range', content_range)
return (end - start + 1) if valid_range else total
def serve(self, range=True, content_length=True):
self.send_response(200)
self.send_header('Content-Type', 'video/mp4')
size = TEST_SIZE
if range:
size = self.send_content_range(TEST_SIZE)
if content_length:
self.send_header('Content-Length', size)
self.end_headers()
self.wfile.write(b'#' * size)
def do_GET(self):
if self.path == '/regular':
self.serve()
elif self.path == '/no-content-length':
self.serve(content_length=False)
elif self.path == '/no-range':
self.serve(range=False)
elif self.path == '/no-range-no-content-length':
self.serve(range=False, content_length=False)
else:
assert False
class FakeLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
class TestHttpFD(unittest.TestCase):
def setUp(self):
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def download(self, params, ep):
params['logger'] = FakeLogger()
ydl = YoutubeDL(params)
downloader = HttpFD(ydl, params)
filename = 'testfile.mp4'
try_rm(encodeFilename(filename))
self.assertTrue(downloader.real_download(filename, {
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
}))
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
try_rm(encodeFilename(filename))
def download_all(self, params):
for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'):
self.download(params, ep)
def test_regular(self):
self.download_all({})
def test_chunked(self):
self.download_all({
'http_chunk_size': 1000,
})
if __name__ == '__main__':
unittest.main()
|
client.py | import threading
import socket
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from Crypto import Random
import base64
class client():
def __init__(self):
self.running = True
self.server_host = '127.0.1.1'
self.key = b'network security' # 16 bits
def run(self):
self.my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Cleint socket IPV4 , TCP
self.my_socket.connect((self.server_host, 6666)) # Connect the Socket to local host port 6666
pubKey = self.my_socket.recv(2048)
print(pubKey)
pubKeyObj = RSA.importKey(pubKey)
msg = pubKeyObj.encrypt(self.key, 'x')[0] # 'x' is byte string used for comptability only (ignored)
self.my_socket.send(msg)
print(self.key)
threading.Thread(target=self.receive).start() # Thread to receive
while self.running:
data = input()
data = self.encrypt(data)
self.my_socket.send(data)
if data == 'END':
self.end()
def end(self):
self.running = False
self.my_socket.close()
def receive(self):
while self.running:
data = self.my_socket.recv(1024)
data = self.decrypt(data)
if data in ['END', '', None]:
self.end()
else:
print("server: " + data)
def encrypt(self, msg):
iv = Random.new().read(AES.block_size) # Initialization vector
cipher = AES.new(self.key, AES.MODE_CFB, iv)
return base64.b64encode(iv + cipher.encrypt(msg))
def decrypt(self, msg):
msg = base64.b64decode(msg)
iv = msg[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CFB, iv)
return cipher.decrypt(msg[AES.block_size:]).decode('utf-8')
if __name__ == '__main__':
my_client = client()
my_client.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.